aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-03-02 10:55:08 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-02 10:55:08 -0500
commit6d6b89bd2e316b78d668f761d380837b81fa71ef (patch)
tree7e63c58611fc6181153526abbdafdd846ed1a19d /drivers/net
parent13dda80e48439b446d0bc9bab34b91484bc8f533 (diff)
parent2507c05ff55fbf38326b08ed27eaed233bc75042 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1341 commits) virtio_net: remove forgotten assignment be2net: fix tx completion polling sis190: fix cable detect via link status poll net: fix protocol sk_buff field bridge: Fix build error when IGMP_SNOOPING is not enabled bnx2x: Tx barriers and locks scm: Only support SCM_RIGHTS on unix domain sockets. vhost-net: restart tx poll on sk_sndbuf full vhost: fix get_user_pages_fast error handling vhost: initialize log eventfd context pointer vhost: logging thinko fix wireless: convert to use netdev_for_each_mc_addr ethtool: do not set some flags, if others failed ipoib: returned back addrlen check for mc addresses netlink: Adding inode field to /proc/net/netlink axnet_cs: add new id bridge: Make IGMP snooping depend upon BRIDGE. bridge: Add multicast count/interval sysfs entries bridge: Add hash elasticity/max sysfs entries bridge: Add multicast_snooping sysfs toggle ... Trivial conflicts in Documentation/feature-removal-schedule.txt
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c501.c2
-rw-r--r--drivers/net/3c505.c13
-rw-r--r--drivers/net/3c509.c10
-rw-r--r--drivers/net/3c515.c2
-rw-r--r--drivers/net/3c523.c13
-rw-r--r--drivers/net/3c527.c17
-rw-r--r--drivers/net/3c59x.c4
-rw-r--r--drivers/net/7990.c6
-rw-r--r--drivers/net/8139cp.c81
-rw-r--r--drivers/net/8139too.c196
-rw-r--r--drivers/net/82596.c15
-rw-r--r--drivers/net/Kconfig82
-rw-r--r--drivers/net/Makefile5
-rw-r--r--drivers/net/a2065.c6
-rw-r--r--drivers/net/acenic.c4
-rw-r--r--drivers/net/amd8111e.c21
-rw-r--r--drivers/net/amd8111e.h1
-rw-r--r--drivers/net/appletalk/ltpc.c1
-rw-r--r--drivers/net/arcnet/com20020-pci.c2
-rw-r--r--drivers/net/ariadne.c4
-rw-r--r--drivers/net/arm/am79c961a.c12
-rw-r--r--drivers/net/arm/at91_ether.c9
-rw-r--r--drivers/net/arm/ep93xx_eth.c140
-rw-r--r--drivers/net/arm/ether3.c2
-rw-r--r--drivers/net/arm/ixp4xx_eth.c13
-rw-r--r--drivers/net/arm/ks8695net.c23
-rw-r--r--drivers/net/arm/w90p910_ether.c8
-rw-r--r--drivers/net/at1700.c8
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/atl1c/atl1c.h11
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c2
-rw-r--r--drivers/net/atl1c/atl1c_hw.c83
-rw-r--r--drivers/net/atl1c/atl1c_hw.h5
-rw-r--r--drivers/net/atl1c/atl1c_main.c126
-rw-r--r--drivers/net/atl1e/atl1e_hw.c23
-rw-r--r--drivers/net/atl1e/atl1e_main.c160
-rw-r--r--drivers/net/atl1e/atl1e_param.c35
-rw-r--r--drivers/net/atlx/atl1.c2
-rw-r--r--drivers/net/atlx/atl2.c4
-rw-r--r--drivers/net/atlx/atlx.c2
-rw-r--r--drivers/net/atp.c9
-rw-r--r--drivers/net/au1000_eth.c7
-rw-r--r--drivers/net/b44.c94
-rw-r--r--drivers/net/bcm63xx_enet.c15
-rw-r--r--drivers/net/benet/Kconfig4
-rw-r--r--drivers/net/benet/be.h19
-rw-r--r--drivers/net/benet/be_cmds.c118
-rw-r--r--drivers/net/benet/be_cmds.h29
-rw-r--r--drivers/net/benet/be_ethtool.c65
-rw-r--r--drivers/net/benet/be_hw.h121
-rw-r--r--drivers/net/benet/be_main.c540
-rw-r--r--drivers/net/bfin_mac.c8
-rw-r--r--drivers/net/bmac.c13
-rw-r--r--drivers/net/bnx2.c410
-rw-r--r--drivers/net/bnx2.h3
-rw-r--r--drivers/net/bnx2x.h53
-rw-r--r--drivers/net/bnx2x_fw_defs.h7
-rw-r--r--drivers/net/bnx2x_hsi.h10
-rw-r--r--drivers/net/bnx2x_init_ops.h13
-rw-r--r--drivers/net/bnx2x_link.c21
-rw-r--r--drivers/net/bnx2x_main.c284
-rw-r--r--drivers/net/bonding/bond_main.c27
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/bfin_can.c4
-rw-r--r--drivers/net/can/dev.c8
-rw-r--r--drivers/net/can/mcp251x.c426
-rw-r--r--drivers/net/can/mscan/Kconfig7
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c248
-rw-r--r--drivers/net/can/mscan/mscan.c58
-rw-r--r--drivers/net/can/mscan/mscan.h86
-rw-r--r--drivers/net/can/sja1000/Kconfig12
-rw-r--r--drivers/net/can/sja1000/Makefile1
-rw-r--r--drivers/net/can/sja1000/ems_pci.c2
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c2
-rw-r--r--drivers/net/can/sja1000/plx_pci.c472
-rw-r--r--drivers/net/can/sja1000/sja1000.c27
-rw-r--r--drivers/net/can/ti_hecc.c73
-rw-r--r--drivers/net/can/usb/Kconfig2
-rw-r--r--drivers/net/can/usb/ems_usb.c6
-rw-r--r--drivers/net/can/vcan.c12
-rw-r--r--drivers/net/cassini.c437
-rw-r--r--drivers/net/chelsio/common.h44
-rw-r--r--drivers/net/chelsio/cxgb2.c20
-rw-r--r--drivers/net/chelsio/espi.c4
-rw-r--r--drivers/net/chelsio/pm3393.c22
-rw-r--r--drivers/net/chelsio/sge.c12
-rw-r--r--drivers/net/chelsio/subr.c34
-rw-r--r--drivers/net/chelsio/vsc7326.c24
-rw-r--r--drivers/net/cnic.c206
-rw-r--r--drivers/net/cnic.h13
-rw-r--r--drivers/net/cnic_defs.h2
-rw-r--r--drivers/net/cnic_if.h6
-rw-r--r--drivers/net/cpmac.c4
-rw-r--r--drivers/net/cris/eth_v10.c8
-rw-r--r--drivers/net/cs89x0.c2
-rw-r--r--drivers/net/cxgb3/common.h28
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c10
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/cxgb3/sge.c16
-rw-r--r--drivers/net/cxgb3/t3_hw.c3
-rw-r--r--drivers/net/cxgb3/xgmac.c18
-rw-r--r--drivers/net/davinci_emac.c12
-rw-r--r--drivers/net/de620.c2
-rw-r--r--drivers/net/declance.c6
-rw-r--r--drivers/net/defxx.c24
-rw-r--r--drivers/net/depca.c5
-rw-r--r--drivers/net/dl2k.c9
-rw-r--r--drivers/net/dl2k.h2
-rw-r--r--drivers/net/dm9000.c5
-rw-r--r--drivers/net/e100.c18
-rw-r--r--drivers/net/e1000/e1000.h1
-rw-r--r--drivers/net/e1000/e1000_ethtool.c19
-rw-r--r--drivers/net/e1000/e1000_main.c46
-rw-r--r--drivers/net/e1000e/82571.c68
-rw-r--r--drivers/net/e1000e/defines.h2
-rw-r--r--drivers/net/e1000e/e1000.h19
-rw-r--r--drivers/net/e1000e/es2lan.c32
-rw-r--r--drivers/net/e1000e/ethtool.c2
-rw-r--r--drivers/net/e1000e/hw.h12
-rw-r--r--drivers/net/e1000e/ich8lan.c1
-rw-r--r--drivers/net/e1000e/lib.c230
-rw-r--r--drivers/net/e1000e/netdev.c45
-rw-r--r--drivers/net/eepro.c21
-rw-r--r--drivers/net/eexpress.c22
-rw-r--r--drivers/net/ehea/ehea_main.c9
-rw-r--r--drivers/net/enc28j60.c2
-rw-r--r--drivers/net/enic/enic.h5
-rw-r--r--drivers/net/enic/enic_main.c208
-rw-r--r--drivers/net/enic/enic_res.c16
-rw-r--r--drivers/net/enic/vnic_dev.c1
-rw-r--r--drivers/net/enic/vnic_enet.h5
-rw-r--r--drivers/net/enic/vnic_intr.c8
-rw-r--r--drivers/net/enic/vnic_intr.h3
-rw-r--r--drivers/net/enic/vnic_nic.h12
-rw-r--r--drivers/net/epic100.c9
-rw-r--r--drivers/net/eth16i.c2
-rw-r--r--drivers/net/ethoc.c14
-rw-r--r--drivers/net/ewrk3.c5
-rw-r--r--drivers/net/fealnx.c8
-rw-r--r--drivers/net/fec.c84
-rw-r--r--drivers/net/fec_mpc52xx.c5
-rw-r--r--drivers/net/forcedeth.c8
-rw-r--r--drivers/net/fs_enet/Kconfig10
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c93
-rw-r--r--drivers/net/fs_enet/fs_enet.h49
-rw-r--r--drivers/net/fs_enet/mac-fcc.c9
-rw-r--r--drivers/net/fs_enet/mac-fec.c62
-rw-r--r--drivers/net/fs_enet/mac-scc.c13
-rw-r--r--drivers/net/fs_enet/mii-fec.c4
-rw-r--r--drivers/net/gianfar.c4
-rw-r--r--drivers/net/greth.c1634
-rw-r--r--drivers/net/greth.h143
-rw-r--r--drivers/net/hamachi.c13
-rw-r--r--drivers/net/hp100.c13
-rw-r--r--drivers/net/ibm_newemac/core.c8
-rw-r--r--drivers/net/ibmlana.c2
-rw-r--r--drivers/net/ibmveth.c8
-rw-r--r--drivers/net/igb/e1000_82575.c65
-rw-r--r--drivers/net/igb/e1000_82575.h5
-rw-r--r--drivers/net/igb/e1000_defines.h7
-rw-r--r--drivers/net/igb/e1000_hw.h7
-rw-r--r--drivers/net/igb/e1000_mac.c70
-rw-r--r--drivers/net/igb/e1000_mac.h2
-rw-r--r--drivers/net/igb/e1000_phy.c35
-rw-r--r--drivers/net/igb/e1000_phy.h2
-rw-r--r--drivers/net/igb/e1000_regs.h1
-rw-r--r--drivers/net/igb/igb.h16
-rw-r--r--drivers/net/igb/igb_ethtool.c93
-rw-r--r--drivers/net/igb/igb_main.c414
-rw-r--r--drivers/net/igbvf/netdev.c31
-rw-r--r--drivers/net/ioc3-eth.c11
-rw-r--r--drivers/net/ipg.c13
-rw-r--r--drivers/net/irda/Kconfig10
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/sh_sir.c823
-rw-r--r--drivers/net/irda/via-ircc.c2
-rw-r--r--drivers/net/irda/vlsi_ir.c2
-rw-r--r--drivers/net/isa-skeleton.c718
-rw-r--r--drivers/net/iseries_veth.c8
-rw-r--r--drivers/net/ixgb/ixgb.h11
-rw-r--r--drivers/net/ixgb/ixgb_main.c104
-rw-r--r--drivers/net/ixgbe/Makefile3
-rw-r--r--drivers/net/ixgbe/ixgbe.h54
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c233
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c19
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c195
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c4
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c676
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c479
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h96
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c362
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h47
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h66
-rw-r--r--drivers/net/ixgbevf/Makefile38
-rw-r--r--drivers/net/ixgbevf/defines.h292
-rw-r--r--drivers/net/ixgbevf/ethtool.c716
-rw-r--r--drivers/net/ixgbevf/ixgbevf.h318
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c3578
-rw-r--r--drivers/net/ixgbevf/mbx.c341
-rw-r--r--drivers/net/ixgbevf/mbx.h100
-rw-r--r--drivers/net/ixgbevf/regs.h85
-rw-r--r--drivers/net/ixgbevf/vf.c387
-rw-r--r--drivers/net/ixgbevf/vf.h168
-rw-r--r--drivers/net/jme.c62
-rw-r--r--drivers/net/jme.h41
-rw-r--r--drivers/net/korina.c10
-rw-r--r--drivers/net/ks8851.c7
-rw-r--r--drivers/net/ks8851_mll.c7
-rw-r--r--drivers/net/ksz884x.c7335
-rw-r--r--drivers/net/lance.c2
-rw-r--r--drivers/net/lib82596.c21
-rw-r--r--drivers/net/lib8390.c15
-rw-r--r--drivers/net/ll_temac_main.c25
-rw-r--r--drivers/net/loopback.c16
-rw-r--r--drivers/net/lp486e.c16
-rw-r--r--drivers/net/mac8390.c632
-rw-r--r--drivers/net/mac89x0.c4
-rw-r--r--drivers/net/macb.c38
-rw-r--r--drivers/net/mace.c11
-rw-r--r--drivers/net/macmace.c12
-rw-r--r--drivers/net/macvlan.c117
-rw-r--r--drivers/net/macvtap.c803
-rw-r--r--drivers/net/meth.c3
-rw-r--r--drivers/net/mlx4/en_rx.c8
-rw-r--r--drivers/net/mlx4/main.c2
-rw-r--r--drivers/net/mv643xx_eth.c6
-rw-r--r--drivers/net/myri10ge/myri10ge.c198
-rw-r--r--drivers/net/myri_sbus.c6
-rw-r--r--drivers/net/natsemi.c8
-rw-r--r--drivers/net/ne2k-pci.c2
-rw-r--r--drivers/net/netxen/Makefile2
-rw-r--r--drivers/net/netxen/netxen_nic.h8
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c2
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c2
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h5
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c46
-rw-r--r--drivers/net/netxen/netxen_nic_hw.h2
-rw-r--r--drivers/net/netxen/netxen_nic_init.c5
-rw-r--r--drivers/net/netxen/netxen_nic_main.c213
-rw-r--r--drivers/net/ni5010.c3
-rw-r--r--drivers/net/ni52.c10
-rw-r--r--drivers/net/ni65.c2
-rw-r--r--drivers/net/niu.c699
-rw-r--r--drivers/net/ns83820.c4
-rw-r--r--drivers/net/octeon/octeon_mgmt.c18
-rw-r--r--drivers/net/pasemi_mac.c2
-rw-r--r--drivers/net/pci-skeleton.c1029
-rw-r--r--drivers/net/pcmcia/3c574_cs.c2
-rw-r--r--drivers/net/pcmcia/3c589_cs.c2
-rw-r--r--drivers/net/pcmcia/axnet_cs.c11
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c10
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c18
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c35
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c88
-rw-r--r--drivers/net/pcnet32.c507
-rw-r--r--drivers/net/phy/broadcom.c5
-rw-r--r--drivers/net/phy/marvell.c38
-rw-r--r--drivers/net/phy/phy_device.c16
-rw-r--r--drivers/net/phy/smsc.c21
-rw-r--r--drivers/net/ppp_generic.c122
-rw-r--r--drivers/net/ps3_gelic_net.c4
-rw-r--r--drivers/net/ps3_gelic_wireless.c149
-rw-r--r--drivers/net/qla3xxx.c3
-rw-r--r--drivers/net/qlcnic/Makefile8
-rw-r--r--drivers/net/qlcnic/qlcnic.h1126
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c534
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c1015
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h937
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c1274
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c1541
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c2720
-rw-r--r--drivers/net/qlge/qlge.h446
-rw-r--r--drivers/net/qlge/qlge_dbg.c1183
-rw-r--r--drivers/net/qlge/qlge_ethtool.c56
-rw-r--r--drivers/net/qlge/qlge_main.c1189
-rw-r--r--drivers/net/qlge/qlge_mpi.c340
-rw-r--r--drivers/net/r6040.c37
-rw-r--r--drivers/net/r8169.c166
-rw-r--r--drivers/net/rrunner.c2
-rw-r--r--drivers/net/s2io.c13
-rw-r--r--drivers/net/sb1250-mac.c6
-rw-r--r--drivers/net/sc92031.c6
-rw-r--r--drivers/net/sfc/efx.c8
-rw-r--r--drivers/net/sfc/efx.h2
-rw-r--r--drivers/net/sfc/ethtool.c10
-rw-r--r--drivers/net/sfc/falcon.c6
-rw-r--r--drivers/net/sfc/mcdi.c109
-rw-r--r--drivers/net/sfc/mcdi.h1
-rw-r--r--drivers/net/sfc/mcdi_pcol.h202
-rw-r--r--drivers/net/sfc/mcdi_phy.c36
-rw-r--r--drivers/net/sfc/mdio_10g.c24
-rw-r--r--drivers/net/sfc/mdio_10g.h3
-rw-r--r--drivers/net/sfc/net_driver.h17
-rw-r--r--drivers/net/sfc/nic.c13
-rw-r--r--drivers/net/sfc/qt202x_phy.c1
-rw-r--r--drivers/net/sfc/selftest.c42
-rw-r--r--drivers/net/sfc/selftest.h4
-rw-r--r--drivers/net/sfc/siena.c16
-rw-r--r--drivers/net/sfc/tenxpress.c2
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/sh_eth.c10
-rw-r--r--drivers/net/sis190.c221
-rw-r--r--drivers/net/sis900.c9
-rw-r--r--drivers/net/skfp/skfddi.c35
-rw-r--r--drivers/net/skge.c218
-rw-r--r--drivers/net/sky2.c726
-rw-r--r--drivers/net/sky2.h10
-rw-r--r--drivers/net/smc911x.c14
-rw-r--r--drivers/net/smc9194.c12
-rw-r--r--drivers/net/smc91x.c11
-rw-r--r--drivers/net/smsc911x.c53
-rw-r--r--drivers/net/smsc9420.c9
-rw-r--r--drivers/net/sonic.c13
-rw-r--r--drivers/net/spider_net.c4
-rw-r--r--drivers/net/starfire.c13
-rw-r--r--drivers/net/stmmac/Kconfig8
-rw-r--r--drivers/net/stmmac/Makefile5
-rw-r--r--drivers/net/stmmac/common.h279
-rw-r--r--drivers/net/stmmac/descs.h4
-rw-r--r--drivers/net/stmmac/dwmac100.c (renamed from drivers/net/stmmac/mac100.c)212
-rw-r--r--drivers/net/stmmac/dwmac100.h (renamed from drivers/net/stmmac/mac100.h)0
-rw-r--r--drivers/net/stmmac/dwmac1000.h (renamed from drivers/net/stmmac/gmac.h)18
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c243
-rw-r--r--drivers/net/stmmac/dwmac1000_dma.c (renamed from drivers/net/stmmac/gmac.c)351
-rw-r--r--drivers/net/stmmac/dwmac_dma.h107
-rw-r--r--drivers/net/stmmac/dwmac_lib.c263
-rw-r--r--drivers/net/stmmac/stmmac.h28
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c11
-rw-r--r--drivers/net/stmmac/stmmac_main.c436
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c11
-rw-r--r--drivers/net/sun3_82586.c10
-rw-r--r--drivers/net/sun3lance.c2
-rw-r--r--drivers/net/sunbmac.c7
-rw-r--r--drivers/net/sundance.c9
-rw-r--r--drivers/net/sungem.c14
-rw-r--r--drivers/net/sunhme.c26
-rw-r--r--drivers/net/sunlance.c6
-rw-r--r--drivers/net/sunqe.c11
-rw-r--r--drivers/net/sunvnet.c7
-rw-r--r--drivers/net/tc35815.c27
-rw-r--r--drivers/net/tehuti.c159
-rw-r--r--drivers/net/tehuti.h30
-rw-r--r--drivers/net/tg3.c965
-rw-r--r--drivers/net/tg3.h162
-rw-r--r--drivers/net/tlan.c37
-rw-r--r--drivers/net/tlan.h3
-rw-r--r--drivers/net/tokenring/3c359.c7
-rw-r--r--drivers/net/tokenring/abyss.c2
-rw-r--r--drivers/net/tokenring/ibmtr.c4
-rw-r--r--drivers/net/tokenring/lanstreamer.c6
-rw-r--r--drivers/net/tokenring/olympic.c7
-rw-r--r--drivers/net/tokenring/tms380tr.c8
-rw-r--r--drivers/net/tokenring/tmspci.c2
-rw-r--r--drivers/net/tsi108_eth.c22
-rw-r--r--drivers/net/tulip/21142.c76
-rw-r--r--drivers/net/tulip/de2104x.c163
-rw-r--r--drivers/net/tulip/de4x5.c16
-rw-r--r--drivers/net/tulip/dmfe.c103
-rw-r--r--drivers/net/tulip/eeprom.c47
-rw-r--r--drivers/net/tulip/interrupt.c100
-rw-r--r--drivers/net/tulip/media.c74
-rw-r--r--drivers/net/tulip/pnic.c33
-rw-r--r--drivers/net/tulip/pnic2.c59
-rw-r--r--drivers/net/tulip/timer.c52
-rw-r--r--drivers/net/tulip/tulip_core.c187
-rw-r--r--drivers/net/tulip/uli526x.c64
-rw-r--r--drivers/net/tulip/winbond-840.c186
-rw-r--r--drivers/net/tulip/xircom_cb.c46
-rw-r--r--drivers/net/tun.c127
-rw-r--r--drivers/net/typhoon.c253
-rw-r--r--drivers/net/ucc_geth.c29
-rw-r--r--drivers/net/usb/asix.c117
-rw-r--r--drivers/net/usb/catc.c9
-rw-r--r--drivers/net/usb/cdc_eem.c10
-rw-r--r--drivers/net/usb/cdc_ether.c22
-rw-r--r--drivers/net/usb/dm9601.c59
-rw-r--r--drivers/net/usb/int51x1.c17
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/mcs7830.c256
-rw-r--r--drivers/net/usb/net1080.c109
-rw-r--r--drivers/net/usb/pegasus.c172
-rw-r--r--drivers/net/usb/rndis_host.c24
-rw-r--r--drivers/net/usb/rtl8150.c9
-rw-r--r--drivers/net/usb/smsc95xx.c245
-rw-r--r--drivers/net/usb/usbnet.c238
-rw-r--r--drivers/net/veth.c19
-rw-r--r--drivers/net/via-rhine.c9
-rw-r--r--drivers/net/via-velocity.c16
-rw-r--r--drivers/net/virtio_net.c474
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c19
-rw-r--r--drivers/net/vxge/vxge-main.c16
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wan/pc300_drv.c2
-rw-r--r--drivers/net/wan/pc300too.c2
-rw-r--r--drivers/net/wan/pci200syn.c2
-rw-r--r--drivers/net/wan/wanxl.c2
-rw-r--r--drivers/net/wimax/i2400m/driver.c17
-rw-r--r--drivers/net/wimax/i2400m/fw.c11
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/adm8211.c27
-rw-r--r--drivers/net/wireless/airo.c39
-rw-r--r--drivers/net/wireless/at76c50x-usb.c6
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h17
-rw-r--r--drivers/net/wireless/ath/ar9170/hw.h1
-rw-r--r--drivers/net/wireless/ath/ar9170/mac.c2
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c193
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h27
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c108
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c121
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c25
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h80
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c30
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c199
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h32
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c442
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c178
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c863
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h34
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c1451
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c75
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c27
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c62
-rw-r--r--drivers/net/wireless/ath/debug.h8
-rw-r--r--drivers/net/wireless/ath/regd.c5
-rw-r--r--drivers/net/wireless/atmel_pci.c2
-rw-r--r--drivers/net/wireless/b43/Kconfig6
-rw-r--r--drivers/net/wireless/b43/Makefile2
-rw-r--r--drivers/net/wireless/b43/b43.h20
-rw-r--r--drivers/net/wireless/b43/dma.c19
-rw-r--r--drivers/net/wireless/b43/dma.h5
-rw-r--r--drivers/net/wireless/b43/main.c98
-rw-r--r--drivers/net/wireless/b43/phy_common.c45
-rw-r--r--drivers/net/wireless/b43/phy_common.h10
-rw-r--r--drivers/net/wireless/b43/phy_lp.c76
-rw-r--r--drivers/net/wireless/b43/phy_n.c3035
-rw-r--r--drivers/net/wireless/b43/phy_n.h98
-rw-r--r--drivers/net/wireless/b43/pio.c17
-rw-r--r--drivers/net/wireless/b43/pio.h45
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c744
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h100
-rw-r--r--drivers/net/wireless/b43legacy/dma.c20
-rw-r--r--drivers/net/wireless/b43legacy/dma.h10
-rw-r--r--drivers/net/wireless/b43legacy/leds.h2
-rw-r--r--drivers/net/wireless/b43legacy/main.c61
-rw-r--r--drivers/net/wireless/b43legacy/pio.c13
-rw-r--r--drivers/net/wireless/b43legacy/pio.h11
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c17
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c9
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig14
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c84
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-fh.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c125
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c116
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c354
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h68
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c388
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h55
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h60
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c1461
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h97
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h44
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c136
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c258
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.c198
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c150
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c77
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c197
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c71
-rw-r--r--drivers/net/wireless/libertas/Kconfig6
-rw-r--r--drivers/net/wireless/libertas/Makefile2
-rw-r--r--drivers/net/wireless/libertas/assoc.c95
-rw-r--r--drivers/net/wireless/libertas/cmd.c22
-rw-r--r--drivers/net/wireless/libertas/cmd.h12
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c21
-rw-r--r--drivers/net/wireless/libertas/defs.h7
-rw-r--r--drivers/net/wireless/libertas/dev.h8
-rw-r--r--drivers/net/wireless/libertas/ethtool.c2
-rw-r--r--drivers/net/wireless/libertas/if_spi.c1
-rw-r--r--drivers/net/wireless/libertas/main.c81
-rw-r--r--drivers/net/wireless/libertas/mesh.c29
-rw-r--r--drivers/net/wireless/libertas/mesh.h32
-rw-r--r--drivers/net/wireless/libertas/scan.c2
-rw-r--r--drivers/net/wireless/libertas/tx.c2
-rw-r--r--drivers/net/wireless/libertas/wext.c26
-rw-r--r--drivers/net/wireless/libertas_tf/main.c13
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c198
-rw-r--r--drivers/net/wireless/mwl8k.c2084
-rw-r--r--drivers/net/wireless/orinoco/hw.c22
-rw-r--r--drivers/net/wireless/orinoco/hw.h2
-rw-r--r--drivers/net/wireless/orinoco/main.c7
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c9
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/p54/main.c51
-rw-r--r--drivers/net/wireless/p54/p54.h8
-rw-r--r--drivers/net/wireless/p54/p54pci.c76
-rw-r--r--drivers/net/wireless/p54/p54pci.h6
-rw-r--r--drivers/net/wireless/p54/p54usb.c2
-rw-r--r--drivers/net/wireless/p54/txrx.c4
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c2
-rw-r--r--drivers/net/wireless/ray_cs.c10
-rw-r--r--drivers/net/wireless/rndis_wlan.c388
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig71
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c48
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c39
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h14
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c203
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c104
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c373
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h90
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h96
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c42
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c79
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.c11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.h10
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c46
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h9
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c41
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180.h1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c37
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c26
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_leds.c6
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_leds.h2
-rw-r--r--drivers/net/wireless/wl12xx/Makefile4
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.h4
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.c69
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.h87
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.c83
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.h22
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_debugfs.c23
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_init.c5
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_init.h47
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c375
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_ps.c9
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_tx.c9
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_tx.h17
-rw-r--r--drivers/net/wireless/wl12xx/wl1271.h67
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.c196
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.h50
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.c102
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c137
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.h67
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_conf.h174
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_debugfs.c62
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.c68
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.c50
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.h4
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_io.c213
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_io.h68
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c823
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.c37
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.h3
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_reg.h99
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.c11
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.c158
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.h30
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_testmode.c283
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_testmode.h31
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c71
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.h36
-rw-r--r--drivers/net/wireless/zd1201.c14
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c10
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c14
-rw-r--r--drivers/net/xilinx_emaclite.c384
-rw-r--r--drivers/net/yellowfin.c173
-rw-r--r--drivers/net/znet.c3
637 files changed, 56354 insertions, 18432 deletions
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index 4d4cad393dce..b6de7b1e2a5c 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -812,7 +812,7 @@ static void set_multicast_list(struct net_device *dev)
812 if (dev->flags & IFF_PROMISC) { 812 if (dev->flags & IFF_PROMISC) {
813 outb(RX_PROM, RX_CMD); 813 outb(RX_PROM, RX_CMD);
814 inb(RX_STATUS); 814 inb(RX_STATUS);
815 } else if (dev->mc_list || dev->flags & IFF_ALLMULTI) { 815 } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
816 /* Multicast or all multicast is the same */ 816 /* Multicast or all multicast is the same */
817 outb(RX_MULT, RX_CMD); 817 outb(RX_MULT, RX_CMD);
818 inb(RX_STATUS); /* Clear status. */ 818 inb(RX_STATUS); /* Clear status. */
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index 9257d7ce0378..04b5bba19021 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -1216,7 +1216,7 @@ static int elp_close(struct net_device *dev)
1216static void elp_set_mc_list(struct net_device *dev) 1216static void elp_set_mc_list(struct net_device *dev)
1217{ 1217{
1218 elp_device *adapter = netdev_priv(dev); 1218 elp_device *adapter = netdev_priv(dev);
1219 struct dev_mc_list *dmi = dev->mc_list; 1219 struct dev_mc_list *dmi;
1220 int i; 1220 int i;
1221 unsigned long flags; 1221 unsigned long flags;
1222 1222
@@ -1229,11 +1229,10 @@ static void elp_set_mc_list(struct net_device *dev)
1229 /* send a "load multicast list" command to the board, max 10 addrs/cmd */ 1229 /* send a "load multicast list" command to the board, max 10 addrs/cmd */
1230 /* if num_addrs==0 the list will be cleared */ 1230 /* if num_addrs==0 the list will be cleared */
1231 adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST; 1231 adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST;
1232 adapter->tx_pcb.length = 6 * dev->mc_count; 1232 adapter->tx_pcb.length = 6 * netdev_mc_count(dev);
1233 for (i = 0; i < dev->mc_count; i++) { 1233 i = 0;
1234 memcpy(adapter->tx_pcb.data.multicast[i], dmi->dmi_addr, 6); 1234 netdev_for_each_mc_addr(dmi, dev)
1235 dmi = dmi->next; 1235 memcpy(adapter->tx_pcb.data.multicast[i++], dmi->dmi_addr, 6);
1236 }
1237 adapter->got[CMD_LOAD_MULTICAST_LIST] = 0; 1236 adapter->got[CMD_LOAD_MULTICAST_LIST] = 0;
1238 if (!send_pcb(dev, &adapter->tx_pcb)) 1237 if (!send_pcb(dev, &adapter->tx_pcb))
1239 pr_err("%s: couldn't send set_multicast command\n", dev->name); 1238 pr_err("%s: couldn't send set_multicast command\n", dev->name);
@@ -1244,7 +1243,7 @@ static void elp_set_mc_list(struct net_device *dev)
1244 TIMEOUT_MSG(__LINE__); 1243 TIMEOUT_MSG(__LINE__);
1245 } 1244 }
1246 } 1245 }
1247 if (dev->mc_count) 1246 if (!netdev_mc_empty(dev))
1248 adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD | RECV_MULTI; 1247 adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD | RECV_MULTI;
1249 else /* num_addrs == 0 */ 1248 else /* num_addrs == 0 */
1250 adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD; 1249 adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 9d85efce5916..902435a76466 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -1111,12 +1111,14 @@ set_multicast_list(struct net_device *dev)
1111 unsigned long flags; 1111 unsigned long flags;
1112 struct el3_private *lp = netdev_priv(dev); 1112 struct el3_private *lp = netdev_priv(dev);
1113 int ioaddr = dev->base_addr; 1113 int ioaddr = dev->base_addr;
1114 int mc_count = netdev_mc_count(dev);
1114 1115
1115 if (el3_debug > 1) { 1116 if (el3_debug > 1) {
1116 static int old; 1117 static int old;
1117 if (old != dev->mc_count) { 1118 if (old != mc_count) {
1118 old = dev->mc_count; 1119 old = mc_count;
1119 pr_debug("%s: Setting Rx mode to %d addresses.\n", dev->name, dev->mc_count); 1120 pr_debug("%s: Setting Rx mode to %d addresses.\n",
1121 dev->name, mc_count);
1120 } 1122 }
1121 } 1123 }
1122 spin_lock_irqsave(&lp->lock, flags); 1124 spin_lock_irqsave(&lp->lock, flags);
@@ -1124,7 +1126,7 @@ set_multicast_list(struct net_device *dev)
1124 outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm, 1126 outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
1125 ioaddr + EL3_CMD); 1127 ioaddr + EL3_CMD);
1126 } 1128 }
1127 else if (dev->mc_count || (dev->flags&IFF_ALLMULTI)) { 1129 else if (mc_count || (dev->flags&IFF_ALLMULTI)) {
1128 outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast, ioaddr + EL3_CMD); 1130 outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast, ioaddr + EL3_CMD);
1129 } 1131 }
1130 else 1132 else
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 063b049ffe55..1e898b1c8068 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -1536,7 +1536,7 @@ static void set_rx_mode(struct net_device *dev)
1536 pr_debug("%s: Setting promiscuous mode.\n", 1536 pr_debug("%s: Setting promiscuous mode.\n",
1537 dev->name); 1537 dev->name);
1538 new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm; 1538 new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm;
1539 } else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) { 1539 } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
1540 new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast; 1540 new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast;
1541 } else 1541 } else
1542 new_mode = SetRxFilter | RxStation | RxBroadcast; 1542 new_mode = SetRxFilter | RxStation | RxBroadcast;
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index 27d80ca5e4c0..beed4fa10c6e 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -625,8 +625,8 @@ static int init586(struct net_device *dev)
625 volatile struct iasetup_cmd_struct *ias_cmd; 625 volatile struct iasetup_cmd_struct *ias_cmd;
626 volatile struct tdr_cmd_struct *tdr_cmd; 626 volatile struct tdr_cmd_struct *tdr_cmd;
627 volatile struct mcsetup_cmd_struct *mc_cmd; 627 volatile struct mcsetup_cmd_struct *mc_cmd;
628 struct dev_mc_list *dmi = dev->mc_list; 628 struct dev_mc_list *dmi;
629 int num_addrs = dev->mc_count; 629 int num_addrs = netdev_mc_count(dev);
630 630
631 ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct)); 631 ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct));
632 632
@@ -771,7 +771,7 @@ static int init586(struct net_device *dev)
771 * Multicast setup 771 * Multicast setup
772 */ 772 */
773 773
774 if (dev->mc_count) { 774 if (num_addrs) {
775 /* I don't understand this: do we really need memory after the init? */ 775 /* I don't understand this: do we really need memory after the init? */
776 int len = ((char *) p->iscp - (char *) ptr - 8) / 6; 776 int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
777 if (len <= 0) { 777 if (len <= 0) {
@@ -787,10 +787,9 @@ static int init586(struct net_device *dev)
787 mc_cmd->cmd_cmd = CMD_MCSETUP | CMD_LAST; 787 mc_cmd->cmd_cmd = CMD_MCSETUP | CMD_LAST;
788 mc_cmd->cmd_link = 0xffff; 788 mc_cmd->cmd_link = 0xffff;
789 mc_cmd->mc_cnt = num_addrs * 6; 789 mc_cmd->mc_cnt = num_addrs * 6;
790 for (i = 0; i < num_addrs; i++) { 790 i = 0;
791 memcpy((char *) mc_cmd->mc_list[i], dmi->dmi_addr, 6); 791 netdev_for_each_mc_addr(dmi, dev)
792 dmi = dmi->next; 792 memcpy((char *) mc_cmd->mc_list[i++], dmi->dmi_addr, 6);
793 }
794 p->scb->cbl_offset = make16(mc_cmd); 793 p->scb->cbl_offset = make16(mc_cmd);
795 p->scb->cmd = CUC_START; 794 p->scb->cmd = CUC_START;
796 elmc_id_attn586(); 795 elmc_id_attn586();
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 36c4191e7bca..5c07b147ec99 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -1526,32 +1526,29 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1526 1526
1527 if ((dev->flags&IFF_PROMISC) || 1527 if ((dev->flags&IFF_PROMISC) ||
1528 (dev->flags&IFF_ALLMULTI) || 1528 (dev->flags&IFF_ALLMULTI) ||
1529 dev->mc_count > 10) 1529 netdev_mc_count(dev) > 10)
1530 /* Enable promiscuous mode */ 1530 /* Enable promiscuous mode */
1531 filt |= 1; 1531 filt |= 1;
1532 else if(dev->mc_count) 1532 else if (!netdev_mc_empty(dev))
1533 { 1533 {
1534 unsigned char block[62]; 1534 unsigned char block[62];
1535 unsigned char *bp; 1535 unsigned char *bp;
1536 struct dev_mc_list *dmc=dev->mc_list; 1536 struct dev_mc_list *dmc;
1537
1538 int i;
1539 1537
1540 if(retry==0) 1538 if(retry==0)
1541 lp->mc_list_valid = 0; 1539 lp->mc_list_valid = 0;
1542 if(!lp->mc_list_valid) 1540 if(!lp->mc_list_valid)
1543 { 1541 {
1544 block[1]=0; 1542 block[1]=0;
1545 block[0]=dev->mc_count; 1543 block[0]=netdev_mc_count(dev);
1546 bp=block+2; 1544 bp=block+2;
1547 1545
1548 for(i=0;i<dev->mc_count;i++) 1546 netdev_for_each_mc_addr(dmc, dev) {
1549 {
1550 memcpy(bp, dmc->dmi_addr, 6); 1547 memcpy(bp, dmc->dmi_addr, 6);
1551 bp+=6; 1548 bp+=6;
1552 dmc=dmc->next;
1553 } 1549 }
1554 if(mc32_command_nowait(dev, 2, block, 2+6*dev->mc_count)==-1) 1550 if(mc32_command_nowait(dev, 2, block,
1551 2+6*netdev_mc_count(dev))==-1)
1555 { 1552 {
1556 lp->mc_reload_wait = 1; 1553 lp->mc_reload_wait = 1;
1557 return; 1554 return;
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 39db0e96815d..f965431f4924 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -375,7 +375,7 @@ static struct vortex_chip_info {
375}; 375};
376 376
377 377
378static struct pci_device_id vortex_pci_tbl[] = { 378static DEFINE_PCI_DEVICE_TABLE(vortex_pci_tbl) = {
379 { 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 }, 379 { 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 },
380 { 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 }, 380 { 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 },
381 { 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 }, 381 { 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 },
@@ -2970,7 +2970,7 @@ static void set_rx_mode(struct net_device *dev)
2970 if (vortex_debug > 3) 2970 if (vortex_debug > 3)
2971 pr_notice("%s: Setting promiscuous mode.\n", dev->name); 2971 pr_notice("%s: Setting promiscuous mode.\n", dev->name);
2972 new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm; 2972 new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
2973 } else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) { 2973 } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
2974 new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast; 2974 new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
2975 } else 2975 } else
2976 new_mode = SetRxFilter | RxStation | RxBroadcast; 2976 new_mode = SetRxFilter | RxStation | RxBroadcast;
diff --git a/drivers/net/7990.c b/drivers/net/7990.c
index b1e5764628c6..4e9a5a20b6a6 100644
--- a/drivers/net/7990.c
+++ b/drivers/net/7990.c
@@ -595,9 +595,8 @@ static void lance_load_multicast (struct net_device *dev)
595 struct lance_private *lp = netdev_priv(dev); 595 struct lance_private *lp = netdev_priv(dev);
596 volatile struct lance_init_block *ib = lp->init_block; 596 volatile struct lance_init_block *ib = lp->init_block;
597 volatile u16 *mcast_table = (u16 *)&ib->filter; 597 volatile u16 *mcast_table = (u16 *)&ib->filter;
598 struct dev_mc_list *dmi=dev->mc_list; 598 struct dev_mc_list *dmi;
599 char *addrs; 599 char *addrs;
600 int i;
601 u32 crc; 600 u32 crc;
602 601
603 /* set all multicast bits */ 602 /* set all multicast bits */
@@ -611,9 +610,8 @@ static void lance_load_multicast (struct net_device *dev)
611 ib->filter [1] = 0; 610 ib->filter [1] = 0;
612 611
613 /* Add addresses */ 612 /* Add addresses */
614 for (i = 0; i < dev->mc_count; i++){ 613 netdev_for_each_mc_addr(dmi, dev) {
615 addrs = dmi->dmi_addr; 614 addrs = dmi->dmi_addr;
616 dmi = dmi->next;
617 615
618 /* multicast address? */ 616 /* multicast address? */
619 if (!(*addrs & 1)) 617 if (!(*addrs & 1))
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 3f452bcbfb9e..3d4406b16658 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -46,6 +46,8 @@
46 46
47 */ 47 */
48 48
49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50
49#define DRV_NAME "8139cp" 51#define DRV_NAME "8139cp"
50#define DRV_VERSION "1.3" 52#define DRV_VERSION "1.3"
51#define DRV_RELDATE "Mar 22, 2004" 53#define DRV_RELDATE "Mar 22, 2004"
@@ -104,8 +106,6 @@ static int multicast_filter_limit = 32;
104module_param(multicast_filter_limit, int, 0); 106module_param(multicast_filter_limit, int, 0);
105MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses"); 107MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
106 108
107#define PFX DRV_NAME ": "
108
109#define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ 109#define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
110 NETIF_MSG_PROBE | \ 110 NETIF_MSG_PROBE | \
111 NETIF_MSG_LINK) 111 NETIF_MSG_LINK)
@@ -394,7 +394,7 @@ static int cp_get_eeprom(struct net_device *dev,
394static int cp_set_eeprom(struct net_device *dev, 394static int cp_set_eeprom(struct net_device *dev,
395 struct ethtool_eeprom *eeprom, u8 *data); 395 struct ethtool_eeprom *eeprom, u8 *data);
396 396
397static struct pci_device_id cp_pci_tbl[] = { 397static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
398 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), }, 398 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
399 { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), }, 399 { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
400 { }, 400 { },
@@ -470,9 +470,8 @@ static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
470static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail, 470static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
471 u32 status, u32 len) 471 u32 status, u32 len)
472{ 472{
473 if (netif_msg_rx_err (cp)) 473 netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
474 pr_debug("%s: rx err, slot %d status 0x%x len %d\n", 474 rx_tail, status, len);
475 cp->dev->name, rx_tail, status, len);
476 cp->dev->stats.rx_errors++; 475 cp->dev->stats.rx_errors++;
477 if (status & RxErrFrame) 476 if (status & RxErrFrame)
478 cp->dev->stats.rx_frame_errors++; 477 cp->dev->stats.rx_frame_errors++;
@@ -545,9 +544,8 @@ rx_status_loop:
545 goto rx_next; 544 goto rx_next;
546 } 545 }
547 546
548 if (netif_msg_rx_status(cp)) 547 netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
549 pr_debug("%s: rx slot %d status 0x%x len %d\n", 548 rx_tail, status, len);
550 dev->name, rx_tail, status, len);
551 549
552 new_skb = netdev_alloc_skb_ip_align(dev, buflen); 550 new_skb = netdev_alloc_skb_ip_align(dev, buflen);
553 if (!new_skb) { 551 if (!new_skb) {
@@ -621,9 +619,8 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
621 if (!status || (status == 0xFFFF)) 619 if (!status || (status == 0xFFFF))
622 return IRQ_NONE; 620 return IRQ_NONE;
623 621
624 if (netif_msg_intr(cp)) 622 netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
625 pr_debug("%s: intr, status %04x cmd %02x cpcmd %04x\n", 623 status, cpr8(Cmd), cpr16(CpCmd));
626 dev->name, status, cpr8(Cmd), cpr16(CpCmd));
627 624
628 cpw16(IntrStatus, status & ~cp_rx_intr_mask); 625 cpw16(IntrStatus, status & ~cp_rx_intr_mask);
629 626
@@ -654,8 +651,8 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
654 651
655 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status); 652 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
656 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status); 653 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
657 pr_err("%s: PCI bus error, status=%04x, PCI status=%04x\n", 654 netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
658 dev->name, status, pci_status); 655 status, pci_status);
659 656
660 /* TODO: reset hardware */ 657 /* TODO: reset hardware */
661 } 658 }
@@ -700,9 +697,8 @@ static void cp_tx (struct cp_private *cp)
700 697
701 if (status & LastFrag) { 698 if (status & LastFrag) {
702 if (status & (TxError | TxFIFOUnder)) { 699 if (status & (TxError | TxFIFOUnder)) {
703 if (netif_msg_tx_err(cp)) 700 netif_dbg(cp, tx_err, cp->dev,
704 pr_debug("%s: tx err, status 0x%x\n", 701 "tx err, status 0x%x\n", status);
705 cp->dev->name, status);
706 cp->dev->stats.tx_errors++; 702 cp->dev->stats.tx_errors++;
707 if (status & TxOWC) 703 if (status & TxOWC)
708 cp->dev->stats.tx_window_errors++; 704 cp->dev->stats.tx_window_errors++;
@@ -717,8 +713,8 @@ static void cp_tx (struct cp_private *cp)
717 ((status >> TxColCntShift) & TxColCntMask); 713 ((status >> TxColCntShift) & TxColCntMask);
718 cp->dev->stats.tx_packets++; 714 cp->dev->stats.tx_packets++;
719 cp->dev->stats.tx_bytes += skb->len; 715 cp->dev->stats.tx_bytes += skb->len;
720 if (netif_msg_tx_done(cp)) 716 netif_dbg(cp, tx_done, cp->dev,
721 pr_debug("%s: tx done, slot %d\n", cp->dev->name, tx_tail); 717 "tx done, slot %d\n", tx_tail);
722 } 718 }
723 dev_kfree_skb_irq(skb); 719 dev_kfree_skb_irq(skb);
724 } 720 }
@@ -752,8 +748,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
752 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) { 748 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
753 netif_stop_queue(dev); 749 netif_stop_queue(dev);
754 spin_unlock_irqrestore(&cp->lock, intr_flags); 750 spin_unlock_irqrestore(&cp->lock, intr_flags);
755 pr_err(PFX "%s: BUG! Tx Ring full when queue awake!\n", 751 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
756 dev->name);
757 return NETDEV_TX_BUSY; 752 return NETDEV_TX_BUSY;
758 } 753 }
759 754
@@ -878,9 +873,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
878 wmb(); 873 wmb();
879 } 874 }
880 cp->tx_head = entry; 875 cp->tx_head = entry;
881 if (netif_msg_tx_queued(cp)) 876 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
882 pr_debug("%s: tx queued, slot %d, skblen %d\n", 877 entry, skb->len);
883 dev->name, entry, skb->len);
884 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) 878 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
885 netif_stop_queue(dev); 879 netif_stop_queue(dev);
886 880
@@ -899,7 +893,7 @@ static void __cp_set_rx_mode (struct net_device *dev)
899{ 893{
900 struct cp_private *cp = netdev_priv(dev); 894 struct cp_private *cp = netdev_priv(dev);
901 u32 mc_filter[2]; /* Multicast hash filter */ 895 u32 mc_filter[2]; /* Multicast hash filter */
902 int i, rx_mode; 896 int rx_mode;
903 u32 tmp; 897 u32 tmp;
904 898
905 /* Note: do not reorder, GCC is clever about common statements. */ 899 /* Note: do not reorder, GCC is clever about common statements. */
@@ -909,7 +903,7 @@ static void __cp_set_rx_mode (struct net_device *dev)
909 AcceptBroadcast | AcceptMulticast | AcceptMyPhys | 903 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
910 AcceptAllPhys; 904 AcceptAllPhys;
911 mc_filter[1] = mc_filter[0] = 0xffffffff; 905 mc_filter[1] = mc_filter[0] = 0xffffffff;
912 } else if ((dev->mc_count > multicast_filter_limit) || 906 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
913 (dev->flags & IFF_ALLMULTI)) { 907 (dev->flags & IFF_ALLMULTI)) {
914 /* Too many to filter perfectly -- accept all multicasts. */ 908 /* Too many to filter perfectly -- accept all multicasts. */
915 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 909 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
@@ -918,8 +912,7 @@ static void __cp_set_rx_mode (struct net_device *dev)
918 struct dev_mc_list *mclist; 912 struct dev_mc_list *mclist;
919 rx_mode = AcceptBroadcast | AcceptMyPhys; 913 rx_mode = AcceptBroadcast | AcceptMyPhys;
920 mc_filter[1] = mc_filter[0] = 0; 914 mc_filter[1] = mc_filter[0] = 0;
921 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 915 netdev_for_each_mc_addr(mclist, dev) {
922 i++, mclist = mclist->next) {
923 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 916 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
924 917
925 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 918 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
@@ -993,7 +986,7 @@ static void cp_reset_hw (struct cp_private *cp)
993 schedule_timeout_uninterruptible(10); 986 schedule_timeout_uninterruptible(10);
994 } 987 }
995 988
996 pr_err("%s: hardware reset timeout\n", cp->dev->name); 989 netdev_err(cp->dev, "hardware reset timeout\n");
997} 990}
998 991
999static inline void cp_start_hw (struct cp_private *cp) 992static inline void cp_start_hw (struct cp_private *cp)
@@ -1160,8 +1153,7 @@ static int cp_open (struct net_device *dev)
1160 struct cp_private *cp = netdev_priv(dev); 1153 struct cp_private *cp = netdev_priv(dev);
1161 int rc; 1154 int rc;
1162 1155
1163 if (netif_msg_ifup(cp)) 1156 netif_dbg(cp, ifup, dev, "enabling interface\n");
1164 pr_debug("%s: enabling interface\n", dev->name);
1165 1157
1166 rc = cp_alloc_rings(cp); 1158 rc = cp_alloc_rings(cp);
1167 if (rc) 1159 if (rc)
@@ -1195,8 +1187,7 @@ static int cp_close (struct net_device *dev)
1195 1187
1196 napi_disable(&cp->napi); 1188 napi_disable(&cp->napi);
1197 1189
1198 if (netif_msg_ifdown(cp)) 1190 netif_dbg(cp, ifdown, dev, "disabling interface\n");
1199 pr_debug("%s: disabling interface\n", dev->name);
1200 1191
1201 spin_lock_irqsave(&cp->lock, flags); 1192 spin_lock_irqsave(&cp->lock, flags);
1202 1193
@@ -1219,9 +1210,9 @@ static void cp_tx_timeout(struct net_device *dev)
1219 unsigned long flags; 1210 unsigned long flags;
1220 int rc; 1211 int rc;
1221 1212
1222 pr_warning("%s: Transmit timeout, status %2x %4x %4x %4x\n", 1213 netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1223 dev->name, cpr8(Cmd), cpr16(CpCmd), 1214 cpr8(Cmd), cpr16(CpCmd),
1224 cpr16(IntrStatus), cpr16(IntrMask)); 1215 cpr16(IntrStatus), cpr16(IntrMask));
1225 1216
1226 spin_lock_irqsave(&cp->lock, flags); 1217 spin_lock_irqsave(&cp->lock, flags);
1227 1218
@@ -1874,8 +1865,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1874 if (pdev->vendor == PCI_VENDOR_ID_REALTEK && 1865 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1875 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) { 1866 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1876 dev_info(&pdev->dev, 1867 dev_info(&pdev->dev,
1877 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n", 1868 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1878 pdev->vendor, pdev->device, pdev->revision); 1869 pdev->vendor, pdev->device, pdev->revision);
1879 return -ENODEV; 1870 return -ENODEV;
1880 } 1871 }
1881 1872
@@ -1933,14 +1924,13 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1933 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1924 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1934 if (rc) { 1925 if (rc) {
1935 dev_err(&pdev->dev, 1926 dev_err(&pdev->dev,
1936 "No usable DMA configuration, aborting.\n"); 1927 "No usable DMA configuration, aborting\n");
1937 goto err_out_res; 1928 goto err_out_res;
1938 } 1929 }
1939 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1930 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1940 if (rc) { 1931 if (rc) {
1941 dev_err(&pdev->dev, 1932 dev_err(&pdev->dev,
1942 "No usable consistent DMA configuration, " 1933 "No usable consistent DMA configuration, aborting\n");
1943 "aborting.\n");
1944 goto err_out_res; 1934 goto err_out_res;
1945 } 1935 }
1946 } 1936 }
@@ -1952,7 +1942,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1952 if (!regs) { 1942 if (!regs) {
1953 rc = -EIO; 1943 rc = -EIO;
1954 dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n", 1944 dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1955 (unsigned long long)pci_resource_len(pdev, 1), 1945 (unsigned long long)pci_resource_len(pdev, 1),
1956 (unsigned long long)pciaddr); 1946 (unsigned long long)pciaddr);
1957 goto err_out_res; 1947 goto err_out_res;
1958 } 1948 }
@@ -1990,11 +1980,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1990 if (rc) 1980 if (rc)
1991 goto err_out_iomap; 1981 goto err_out_iomap;
1992 1982
1993 pr_info("%s: RTL-8139C+ at 0x%lx, %pM, IRQ %d\n", 1983 netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n",
1994 dev->name, 1984 dev->base_addr, dev->dev_addr, dev->irq);
1995 dev->base_addr,
1996 dev->dev_addr,
1997 dev->irq);
1998 1985
1999 pci_set_drvdata(pdev, dev); 1986 pci_set_drvdata(pdev, dev);
2000 1987
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 25f7339daabd..b4efc913978b 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -89,6 +89,8 @@
89 89
90*/ 90*/
91 91
92#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
92#define DRV_NAME "8139too" 94#define DRV_NAME "8139too"
93#define DRV_VERSION "0.9.28" 95#define DRV_VERSION "0.9.28"
94 96
@@ -111,7 +113,6 @@
111#include <asm/irq.h> 113#include <asm/irq.h>
112 114
113#define RTL8139_DRIVER_NAME DRV_NAME " Fast Ethernet driver " DRV_VERSION 115#define RTL8139_DRIVER_NAME DRV_NAME " Fast Ethernet driver " DRV_VERSION
114#define PFX DRV_NAME ": "
115 116
116/* Default Message level */ 117/* Default Message level */
117#define RTL8139_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ 118#define RTL8139_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
@@ -130,9 +131,9 @@
130# define assert(expr) do {} while (0) 131# define assert(expr) do {} while (0)
131#else 132#else
132# define assert(expr) \ 133# define assert(expr) \
133 if(unlikely(!(expr))) { \ 134 if (unlikely(!(expr))) { \
134 pr_err("Assertion failed! %s,%s,%s,line=%d\n", \ 135 pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
135 #expr, __FILE__, __func__, __LINE__); \ 136 #expr, __FILE__, __func__, __LINE__); \
136 } 137 }
137#endif 138#endif
138 139
@@ -231,7 +232,7 @@ static const struct {
231}; 232};
232 233
233 234
234static struct pci_device_id rtl8139_pci_tbl[] = { 235static DEFINE_PCI_DEVICE_TABLE(rtl8139_pci_tbl) = {
235 {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, 236 {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
236 {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, 237 {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
237 {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, 238 {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
@@ -957,7 +958,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
957 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && 958 pdev->device == PCI_DEVICE_ID_REALTEK_8139 &&
958 pdev->subsystem_vendor == PCI_VENDOR_ID_ATHEROS && 959 pdev->subsystem_vendor == PCI_VENDOR_ID_ATHEROS &&
959 pdev->subsystem_device == PCI_DEVICE_ID_REALTEK_8139) { 960 pdev->subsystem_device == PCI_DEVICE_ID_REALTEK_8139) {
960 pr_info("8139too: OQO Model 2 detected. Forcing PIO\n"); 961 pr_info("OQO Model 2 detected. Forcing PIO\n");
961 use_io = 1; 962 use_io = 1;
962 } 963 }
963 964
@@ -1010,21 +1011,19 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
1010 tp->mii.reg_num_mask = 0x1f; 1011 tp->mii.reg_num_mask = 0x1f;
1011 1012
1012 /* dev is fully set up and ready to use now */ 1013 /* dev is fully set up and ready to use now */
1013 pr_debug("about to register device named %s (%p)...\n", dev->name, dev); 1014 pr_debug("about to register device named %s (%p)...\n",
1015 dev->name, dev);
1014 i = register_netdev (dev); 1016 i = register_netdev (dev);
1015 if (i) goto err_out; 1017 if (i) goto err_out;
1016 1018
1017 pci_set_drvdata (pdev, dev); 1019 pci_set_drvdata (pdev, dev);
1018 1020
1019 pr_info("%s: %s at 0x%lx, %pM, IRQ %d\n", 1021 netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n",
1020 dev->name, 1022 board_info[ent->driver_data].name,
1021 board_info[ent->driver_data].name, 1023 dev->base_addr, dev->dev_addr, dev->irq);
1022 dev->base_addr,
1023 dev->dev_addr,
1024 dev->irq);
1025 1024
1026 pr_debug("%s: Identified 8139 chip type '%s'\n", 1025 netdev_dbg(dev, "Identified 8139 chip type '%s'\n",
1027 dev->name, rtl_chip_info[tp->chipset].name); 1026 rtl_chip_info[tp->chipset].name);
1028 1027
1029 /* Find the connected MII xcvrs. 1028 /* Find the connected MII xcvrs.
1030 Doing this in open() would allow detecting external xcvrs later, but 1029 Doing this in open() would allow detecting external xcvrs later, but
@@ -1037,13 +1036,12 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
1037 if (mii_status != 0xffff && mii_status != 0x0000) { 1036 if (mii_status != 0xffff && mii_status != 0x0000) {
1038 u16 advertising = mdio_read(dev, phy, 4); 1037 u16 advertising = mdio_read(dev, phy, 4);
1039 tp->phys[phy_idx++] = phy; 1038 tp->phys[phy_idx++] = phy;
1040 pr_info("%s: MII transceiver %d status 0x%4.4x advertising %4.4x.\n", 1039 netdev_info(dev, "MII transceiver %d status 0x%04x advertising %04x\n",
1041 dev->name, phy, mii_status, advertising); 1040 phy, mii_status, advertising);
1042 } 1041 }
1043 } 1042 }
1044 if (phy_idx == 0) { 1043 if (phy_idx == 0) {
1045 pr_info("%s: No MII transceivers found! Assuming SYM transceiver.\n", 1044 netdev_info(dev, "No MII transceivers found! Assuming SYM transceiver\n");
1046 dev->name);
1047 tp->phys[0] = 32; 1045 tp->phys[0] = 32;
1048 } 1046 }
1049 } else 1047 } else
@@ -1062,15 +1060,15 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
1062 if (board_idx < MAX_UNITS && full_duplex[board_idx] > 0) 1060 if (board_idx < MAX_UNITS && full_duplex[board_idx] > 0)
1063 tp->mii.full_duplex = full_duplex[board_idx]; 1061 tp->mii.full_duplex = full_duplex[board_idx];
1064 if (tp->mii.full_duplex) { 1062 if (tp->mii.full_duplex) {
1065 pr_info("%s: Media type forced to Full Duplex.\n", dev->name); 1063 netdev_info(dev, "Media type forced to Full Duplex\n");
1066 /* Changing the MII-advertised media because might prevent 1064 /* Changing the MII-advertised media because might prevent
1067 re-connection. */ 1065 re-connection. */
1068 tp->mii.force_media = 1; 1066 tp->mii.force_media = 1;
1069 } 1067 }
1070 if (tp->default_port) { 1068 if (tp->default_port) {
1071 pr_info(" Forcing %dMbps %s-duplex operation.\n", 1069 netdev_info(dev, " Forcing %dMbps %s-duplex operation\n",
1072 (option & 0x20 ? 100 : 10), 1070 (option & 0x20 ? 100 : 10),
1073 (option & 0x10 ? "full" : "half")); 1071 (option & 0x10 ? "full" : "half"));
1074 mdio_write(dev, tp->phys[0], 0, 1072 mdio_write(dev, tp->phys[0], 0,
1075 ((option & 0x20) ? 0x2000 : 0) | /* 100Mbps? */ 1073 ((option & 0x20) ? 0x2000 : 0) | /* 100Mbps? */
1076 ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */ 1074 ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
@@ -1330,12 +1328,12 @@ static int rtl8139_open (struct net_device *dev)
1330 rtl8139_hw_start (dev); 1328 rtl8139_hw_start (dev);
1331 netif_start_queue (dev); 1329 netif_start_queue (dev);
1332 1330
1333 if (netif_msg_ifup(tp)) 1331 netif_dbg(tp, ifup, dev,
1334 pr_debug("%s: rtl8139_open() ioaddr %#llx IRQ %d" 1332 "%s() ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n",
1335 " GP Pins %2.2x %s-duplex.\n", dev->name, 1333 __func__,
1336 (unsigned long long)pci_resource_start (tp->pci_dev, 1), 1334 (unsigned long long)pci_resource_start (tp->pci_dev, 1),
1337 dev->irq, RTL_R8 (MediaStatus), 1335 dev->irq, RTL_R8 (MediaStatus),
1338 tp->mii.full_duplex ? "full" : "half"); 1336 tp->mii.full_duplex ? "full" : "half");
1339 1337
1340 rtl8139_start_thread(tp); 1338 rtl8139_start_thread(tp);
1341 1339
@@ -1393,7 +1391,7 @@ static void rtl8139_hw_start (struct net_device *dev)
1393 RTL_W8 (Config3, RTL_R8 (Config3) & ~Cfg3_Magic); 1391 RTL_W8 (Config3, RTL_R8 (Config3) & ~Cfg3_Magic);
1394 } 1392 }
1395 1393
1396 pr_debug("init buffer addresses\n"); 1394 netdev_dbg(dev, "init buffer addresses\n");
1397 1395
1398 /* Lock Config[01234] and BMCR register writes */ 1396 /* Lock Config[01234] and BMCR register writes */
1399 RTL_W8 (Cfg9346, Cfg9346_Lock); 1397 RTL_W8 (Cfg9346, Cfg9346_Lock);
@@ -1555,14 +1553,11 @@ static inline void rtl8139_thread_iter (struct net_device *dev,
1555 tp->mii.full_duplex = duplex; 1553 tp->mii.full_duplex = duplex;
1556 1554
1557 if (mii_lpa) { 1555 if (mii_lpa) {
1558 pr_info("%s: Setting %s-duplex based on MII #%d link" 1556 netdev_info(dev, "Setting %s-duplex based on MII #%d link partner ability of %04x\n",
1559 " partner ability of %4.4x.\n", 1557 tp->mii.full_duplex ? "full" : "half",
1560 dev->name, 1558 tp->phys[0], mii_lpa);
1561 tp->mii.full_duplex ? "full" : "half",
1562 tp->phys[0], mii_lpa);
1563 } else { 1559 } else {
1564 pr_info("%s: media is unconnected, link down, or incompatible connection\n", 1560 netdev_info(dev, "media is unconnected, link down, or incompatible connection\n");
1565 dev->name);
1566 } 1561 }
1567#if 0 1562#if 0
1568 RTL_W8 (Cfg9346, Cfg9346_Unlock); 1563 RTL_W8 (Cfg9346, Cfg9346_Unlock);
@@ -1576,13 +1571,12 @@ static inline void rtl8139_thread_iter (struct net_device *dev,
1576 1571
1577 rtl8139_tune_twister (dev, tp); 1572 rtl8139_tune_twister (dev, tp);
1578 1573
1579 pr_debug("%s: Media selection tick, Link partner %4.4x.\n", 1574 netdev_dbg(dev, "Media selection tick, Link partner %04x\n",
1580 dev->name, RTL_R16 (NWayLPAR)); 1575 RTL_R16(NWayLPAR));
1581 pr_debug("%s: Other registers are IntMask %4.4x IntStatus %4.4x\n", 1576 netdev_dbg(dev, "Other registers are IntMask %04x IntStatus %04x\n",
1582 dev->name, RTL_R16 (IntrMask), RTL_R16 (IntrStatus)); 1577 RTL_R16(IntrMask), RTL_R16(IntrStatus));
1583 pr_debug("%s: Chip config %2.2x %2.2x.\n", 1578 netdev_dbg(dev, "Chip config %02x %02x\n",
1584 dev->name, RTL_R8 (Config0), 1579 RTL_R8(Config0), RTL_R8(Config1));
1585 RTL_R8 (Config1));
1586} 1580}
1587 1581
1588static void rtl8139_thread (struct work_struct *work) 1582static void rtl8139_thread (struct work_struct *work)
@@ -1640,17 +1634,17 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
1640 int i; 1634 int i;
1641 u8 tmp8; 1635 u8 tmp8;
1642 1636
1643 pr_debug("%s: Transmit timeout, status %2.2x %4.4x %4.4x media %2.2x.\n", 1637 netdev_dbg(dev, "Transmit timeout, status %02x %04x %04x media %02x\n",
1644 dev->name, RTL_R8 (ChipCmd), 1638 RTL_R8(ChipCmd), RTL_R16(IntrStatus),
1645 RTL_R16(IntrStatus), RTL_R16(IntrMask), RTL_R8(MediaStatus)); 1639 RTL_R16(IntrMask), RTL_R8(MediaStatus));
1646 /* Emit info to figure out what went wrong. */ 1640 /* Emit info to figure out what went wrong. */
1647 pr_debug("%s: Tx queue start entry %ld dirty entry %ld.\n", 1641 netdev_dbg(dev, "Tx queue start entry %ld dirty entry %ld\n",
1648 dev->name, tp->cur_tx, tp->dirty_tx); 1642 tp->cur_tx, tp->dirty_tx);
1649 for (i = 0; i < NUM_TX_DESC; i++) 1643 for (i = 0; i < NUM_TX_DESC; i++)
1650 pr_debug("%s: Tx descriptor %d is %8.8lx.%s\n", 1644 netdev_dbg(dev, "Tx descriptor %d is %08lx%s\n",
1651 dev->name, i, RTL_R32 (TxStatus0 + (i * 4)), 1645 i, RTL_R32(TxStatus0 + (i * 4)),
1652 i == tp->dirty_tx % NUM_TX_DESC ? 1646 i == tp->dirty_tx % NUM_TX_DESC ?
1653 " (queue head)" : ""); 1647 " (queue head)" : "");
1654 1648
1655 tp->xstats.tx_timeouts++; 1649 tp->xstats.tx_timeouts++;
1656 1650
@@ -1729,9 +1723,8 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
1729 netif_stop_queue (dev); 1723 netif_stop_queue (dev);
1730 spin_unlock_irqrestore(&tp->lock, flags); 1724 spin_unlock_irqrestore(&tp->lock, flags);
1731 1725
1732 if (netif_msg_tx_queued(tp)) 1726 netif_dbg(tp, tx_queued, dev, "Queued Tx packet size %u to slot %d\n",
1733 pr_debug("%s: Queued Tx packet size %u to slot %d.\n", 1727 len, entry);
1734 dev->name, len, entry);
1735 1728
1736 return NETDEV_TX_OK; 1729 return NETDEV_TX_OK;
1737} 1730}
@@ -1760,9 +1753,8 @@ static void rtl8139_tx_interrupt (struct net_device *dev,
1760 /* Note: TxCarrierLost is always asserted at 100mbps. */ 1753 /* Note: TxCarrierLost is always asserted at 100mbps. */
1761 if (txstatus & (TxOutOfWindow | TxAborted)) { 1754 if (txstatus & (TxOutOfWindow | TxAborted)) {
1762 /* There was an major error, log it. */ 1755 /* There was an major error, log it. */
1763 if (netif_msg_tx_err(tp)) 1756 netif_dbg(tp, tx_err, dev, "Transmit error, Tx status %08x\n",
1764 pr_debug("%s: Transmit error, Tx status %8.8x.\n", 1757 txstatus);
1765 dev->name, txstatus);
1766 dev->stats.tx_errors++; 1758 dev->stats.tx_errors++;
1767 if (txstatus & TxAborted) { 1759 if (txstatus & TxAborted) {
1768 dev->stats.tx_aborted_errors++; 1760 dev->stats.tx_aborted_errors++;
@@ -1792,8 +1784,8 @@ static void rtl8139_tx_interrupt (struct net_device *dev,
1792 1784
1793#ifndef RTL8139_NDEBUG 1785#ifndef RTL8139_NDEBUG
1794 if (tp->cur_tx - dirty_tx > NUM_TX_DESC) { 1786 if (tp->cur_tx - dirty_tx > NUM_TX_DESC) {
1795 pr_err("%s: Out-of-sync dirty pointer, %ld vs. %ld.\n", 1787 netdev_err(dev, "Out-of-sync dirty pointer, %ld vs. %ld\n",
1796 dev->name, dirty_tx, tp->cur_tx); 1788 dirty_tx, tp->cur_tx);
1797 dirty_tx += NUM_TX_DESC; 1789 dirty_tx += NUM_TX_DESC;
1798 } 1790 }
1799#endif /* RTL8139_NDEBUG */ 1791#endif /* RTL8139_NDEBUG */
@@ -1816,14 +1808,13 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
1816 int tmp_work; 1808 int tmp_work;
1817#endif 1809#endif
1818 1810
1819 if (netif_msg_rx_err (tp)) 1811 netif_dbg(tp, rx_err, dev, "Ethernet frame had errors, status %08x\n",
1820 pr_debug("%s: Ethernet frame had errors, status %8.8x.\n", 1812 rx_status);
1821 dev->name, rx_status);
1822 dev->stats.rx_errors++; 1813 dev->stats.rx_errors++;
1823 if (!(rx_status & RxStatusOK)) { 1814 if (!(rx_status & RxStatusOK)) {
1824 if (rx_status & RxTooLong) { 1815 if (rx_status & RxTooLong) {
1825 pr_debug("%s: Oversized Ethernet frame, status %4.4x!\n", 1816 netdev_dbg(dev, "Oversized Ethernet frame, status %04x!\n",
1826 dev->name, rx_status); 1817 rx_status);
1827 /* A.C.: The chip hangs here. */ 1818 /* A.C.: The chip hangs here. */
1828 } 1819 }
1829 if (rx_status & (RxBadSymbol | RxBadAlign)) 1820 if (rx_status & (RxBadSymbol | RxBadAlign))
@@ -1855,7 +1846,7 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
1855 break; 1846 break;
1856 } 1847 }
1857 if (tmp_work <= 0) 1848 if (tmp_work <= 0)
1858 pr_warning(PFX "rx stop wait too long\n"); 1849 netdev_warn(dev, "rx stop wait too long\n");
1859 /* restart receive */ 1850 /* restart receive */
1860 tmp_work = 200; 1851 tmp_work = 200;
1861 while (--tmp_work > 0) { 1852 while (--tmp_work > 0) {
@@ -1866,7 +1857,7 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
1866 break; 1857 break;
1867 } 1858 }
1868 if (tmp_work <= 0) 1859 if (tmp_work <= 0)
1869 pr_warning(PFX "tx/rx enable wait too long\n"); 1860 netdev_warn(dev, "tx/rx enable wait too long\n");
1870 1861
1871 /* and reinitialize all rx related registers */ 1862 /* and reinitialize all rx related registers */
1872 RTL_W8_F (Cfg9346, Cfg9346_Unlock); 1863 RTL_W8_F (Cfg9346, Cfg9346_Unlock);
@@ -1877,7 +1868,7 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
1877 RTL_W32 (RxConfig, tp->rx_config); 1868 RTL_W32 (RxConfig, tp->rx_config);
1878 tp->cur_rx = 0; 1869 tp->cur_rx = 0;
1879 1870
1880 pr_debug("init buffer addresses\n"); 1871 netdev_dbg(dev, "init buffer addresses\n");
1881 1872
1882 /* Lock Config[01234] and BMCR register writes */ 1873 /* Lock Config[01234] and BMCR register writes */
1883 RTL_W8 (Cfg9346, Cfg9346_Lock); 1874 RTL_W8 (Cfg9346, Cfg9346_Lock);
@@ -1931,10 +1922,9 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
1931 unsigned int cur_rx = tp->cur_rx; 1922 unsigned int cur_rx = tp->cur_rx;
1932 unsigned int rx_size = 0; 1923 unsigned int rx_size = 0;
1933 1924
1934 pr_debug("%s: In rtl8139_rx(), current %4.4x BufAddr %4.4x," 1925 netdev_dbg(dev, "In %s(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
1935 " free to %4.4x, Cmd %2.2x.\n", dev->name, (u16)cur_rx, 1926 __func__, (u16)cur_rx,
1936 RTL_R16 (RxBufAddr), 1927 RTL_R16(RxBufAddr), RTL_R16(RxBufPtr), RTL_R8(ChipCmd));
1937 RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd));
1938 1928
1939 while (netif_running(dev) && received < budget && 1929 while (netif_running(dev) && received < budget &&
1940 (RTL_R8 (ChipCmd) & RxBufEmpty) == 0) { 1930 (RTL_R8 (ChipCmd) & RxBufEmpty) == 0) {
@@ -1950,19 +1940,12 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
1950 rx_size = rx_status >> 16; 1940 rx_size = rx_status >> 16;
1951 pkt_size = rx_size - 4; 1941 pkt_size = rx_size - 4;
1952 1942
1953 if (netif_msg_rx_status(tp)) 1943 netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n",
1954 pr_debug("%s: rtl8139_rx() status %4.4x, size %4.4x," 1944 __func__, rx_status, rx_size, cur_rx);
1955 " cur %4.4x.\n", dev->name, rx_status,
1956 rx_size, cur_rx);
1957#if RTL8139_DEBUG > 2 1945#if RTL8139_DEBUG > 2
1958 { 1946 print_dump_hex(KERN_DEBUG, "Frame contents: ",
1959 int i; 1947 DUMP_PREFIX_OFFSET, 16, 1,
1960 pr_debug("%s: Frame contents ", dev->name); 1948 &rx_ring[ring_offset], 70, true);
1961 for (i = 0; i < 70; i++)
1962 pr_cont(" %2.2x",
1963 rx_ring[ring_offset + i]);
1964 pr_cont(".\n");
1965 }
1966#endif 1949#endif
1967 1950
1968 /* Packet copy from FIFO still in progress. 1951 /* Packet copy from FIFO still in progress.
@@ -1973,14 +1956,11 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
1973 if (!tp->fifo_copy_timeout) 1956 if (!tp->fifo_copy_timeout)
1974 tp->fifo_copy_timeout = jiffies + 2; 1957 tp->fifo_copy_timeout = jiffies + 2;
1975 else if (time_after(jiffies, tp->fifo_copy_timeout)) { 1958 else if (time_after(jiffies, tp->fifo_copy_timeout)) {
1976 pr_debug("%s: hung FIFO. Reset.", dev->name); 1959 netdev_dbg(dev, "hung FIFO. Reset\n");
1977 rx_size = 0; 1960 rx_size = 0;
1978 goto no_early_rx; 1961 goto no_early_rx;
1979 } 1962 }
1980 if (netif_msg_intr(tp)) { 1963 netif_dbg(tp, intr, dev, "fifo copy in progress\n");
1981 pr_debug("%s: fifo copy in progress.",
1982 dev->name);
1983 }
1984 tp->xstats.early_rx++; 1964 tp->xstats.early_rx++;
1985 break; 1965 break;
1986 } 1966 }
@@ -2021,8 +2001,7 @@ no_early_rx:
2021 netif_receive_skb (skb); 2001 netif_receive_skb (skb);
2022 } else { 2002 } else {
2023 if (net_ratelimit()) 2003 if (net_ratelimit())
2024 pr_warning("%s: Memory squeeze, dropping packet.\n", 2004 netdev_warn(dev, "Memory squeeze, dropping packet\n");
2025 dev->name);
2026 dev->stats.rx_dropped++; 2005 dev->stats.rx_dropped++;
2027 } 2006 }
2028 received++; 2007 received++;
@@ -2036,10 +2015,9 @@ no_early_rx:
2036 if (unlikely(!received || rx_size == 0xfff0)) 2015 if (unlikely(!received || rx_size == 0xfff0))
2037 rtl8139_isr_ack(tp); 2016 rtl8139_isr_ack(tp);
2038 2017
2039 pr_debug("%s: Done rtl8139_rx(), current %4.4x BufAddr %4.4x," 2018 netdev_dbg(dev, "Done %s(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
2040 " free to %4.4x, Cmd %2.2x.\n", dev->name, cur_rx, 2019 __func__, cur_rx,
2041 RTL_R16 (RxBufAddr), 2020 RTL_R16(RxBufAddr), RTL_R16(RxBufPtr), RTL_R8(ChipCmd));
2042 RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd));
2043 2021
2044 tp->cur_rx = cur_rx; 2022 tp->cur_rx = cur_rx;
2045 2023
@@ -2060,8 +2038,7 @@ static void rtl8139_weird_interrupt (struct net_device *dev,
2060 void __iomem *ioaddr, 2038 void __iomem *ioaddr,
2061 int status, int link_changed) 2039 int status, int link_changed)
2062{ 2040{
2063 pr_debug("%s: Abnormal interrupt, status %8.8x.\n", 2041 netdev_dbg(dev, "Abnormal interrupt, status %08x\n", status);
2064 dev->name, status);
2065 2042
2066 assert (dev != NULL); 2043 assert (dev != NULL);
2067 assert (tp != NULL); 2044 assert (tp != NULL);
@@ -2089,8 +2066,7 @@ static void rtl8139_weird_interrupt (struct net_device *dev,
2089 pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status); 2066 pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status);
2090 pci_write_config_word (tp->pci_dev, PCI_STATUS, pci_cmd_status); 2067 pci_write_config_word (tp->pci_dev, PCI_STATUS, pci_cmd_status);
2091 2068
2092 pr_err("%s: PCI Bus error %4.4x.\n", 2069 netdev_err(dev, "PCI Bus error %04x\n", pci_cmd_status);
2093 dev->name, pci_cmd_status);
2094 } 2070 }
2095} 2071}
2096 2072
@@ -2183,8 +2159,8 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance)
2183 out: 2159 out:
2184 spin_unlock (&tp->lock); 2160 spin_unlock (&tp->lock);
2185 2161
2186 pr_debug("%s: exiting interrupt, intr_status=%#4.4x.\n", 2162 netdev_dbg(dev, "exiting interrupt, intr_status=%#4.4x\n",
2187 dev->name, RTL_R16 (IntrStatus)); 2163 RTL_R16(IntrStatus));
2188 return IRQ_RETVAL(handled); 2164 return IRQ_RETVAL(handled);
2189} 2165}
2190 2166
@@ -2233,9 +2209,8 @@ static int rtl8139_close (struct net_device *dev)
2233 netif_stop_queue(dev); 2209 netif_stop_queue(dev);
2234 napi_disable(&tp->napi); 2210 napi_disable(&tp->napi);
2235 2211
2236 if (netif_msg_ifdown(tp)) 2212 netif_dbg(tp, ifdown, dev, "Shutting down ethercard, status was 0x%04x\n",
2237 pr_debug("%s: Shutting down ethercard, status was 0x%4.4x.\n", 2213 RTL_R16(IntrStatus));
2238 dev->name, RTL_R16 (IntrStatus));
2239 2214
2240 spin_lock_irqsave (&tp->lock, flags); 2215 spin_lock_irqsave (&tp->lock, flags);
2241 2216
@@ -2509,11 +2484,11 @@ static void __set_rx_mode (struct net_device *dev)
2509 struct rtl8139_private *tp = netdev_priv(dev); 2484 struct rtl8139_private *tp = netdev_priv(dev);
2510 void __iomem *ioaddr = tp->mmio_addr; 2485 void __iomem *ioaddr = tp->mmio_addr;
2511 u32 mc_filter[2]; /* Multicast hash filter */ 2486 u32 mc_filter[2]; /* Multicast hash filter */
2512 int i, rx_mode; 2487 int rx_mode;
2513 u32 tmp; 2488 u32 tmp;
2514 2489
2515 pr_debug("%s: rtl8139_set_rx_mode(%4.4x) done -- Rx config %8.8lx.\n", 2490 netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08lx\n",
2516 dev->name, dev->flags, RTL_R32 (RxConfig)); 2491 dev->flags, RTL_R32(RxConfig));
2517 2492
2518 /* Note: do not reorder, GCC is clever about common statements. */ 2493 /* Note: do not reorder, GCC is clever about common statements. */
2519 if (dev->flags & IFF_PROMISC) { 2494 if (dev->flags & IFF_PROMISC) {
@@ -2521,7 +2496,7 @@ static void __set_rx_mode (struct net_device *dev)
2521 AcceptBroadcast | AcceptMulticast | AcceptMyPhys | 2496 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
2522 AcceptAllPhys; 2497 AcceptAllPhys;
2523 mc_filter[1] = mc_filter[0] = 0xffffffff; 2498 mc_filter[1] = mc_filter[0] = 0xffffffff;
2524 } else if ((dev->mc_count > multicast_filter_limit) || 2499 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2525 (dev->flags & IFF_ALLMULTI)) { 2500 (dev->flags & IFF_ALLMULTI)) {
2526 /* Too many to filter perfectly -- accept all multicasts. */ 2501 /* Too many to filter perfectly -- accept all multicasts. */
2527 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 2502 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
@@ -2530,8 +2505,7 @@ static void __set_rx_mode (struct net_device *dev)
2530 struct dev_mc_list *mclist; 2505 struct dev_mc_list *mclist;
2531 rx_mode = AcceptBroadcast | AcceptMyPhys; 2506 rx_mode = AcceptBroadcast | AcceptMyPhys;
2532 mc_filter[1] = mc_filter[0] = 0; 2507 mc_filter[1] = mc_filter[0] = 0;
2533 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 2508 netdev_for_each_mc_addr(mclist, dev) {
2534 i++, mclist = mclist->next) {
2535 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 2509 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
2536 2510
2537 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 2511 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index 1663bc9e45de..f94d17d78bb0 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -1505,7 +1505,7 @@ static void set_multicast_list(struct net_device *dev)
1505 int config = 0, cnt; 1505 int config = 0, cnt;
1506 1506
1507 DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n", 1507 DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1508 dev->name, dev->mc_count, 1508 dev->name, netdev_mc_count(dev),
1509 dev->flags & IFF_PROMISC ? "ON" : "OFF", 1509 dev->flags & IFF_PROMISC ? "ON" : "OFF",
1510 dev->flags & IFF_ALLMULTI ? "ON" : "OFF")); 1510 dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1511 1511
@@ -1533,7 +1533,7 @@ static void set_multicast_list(struct net_device *dev)
1533 i596_add_cmd(dev, &lp->cf_cmd.cmd); 1533 i596_add_cmd(dev, &lp->cf_cmd.cmd);
1534 } 1534 }
1535 1535
1536 cnt = dev->mc_count; 1536 cnt = netdev_mc_count(dev);
1537 if (cnt > MAX_MC_CNT) 1537 if (cnt > MAX_MC_CNT)
1538 { 1538 {
1539 cnt = MAX_MC_CNT; 1539 cnt = MAX_MC_CNT;
@@ -1541,7 +1541,7 @@ static void set_multicast_list(struct net_device *dev)
1541 dev->name, cnt); 1541 dev->name, cnt);
1542 } 1542 }
1543 1543
1544 if (dev->mc_count > 0) { 1544 if (!netdev_mc_empty(dev)) {
1545 struct dev_mc_list *dmi; 1545 struct dev_mc_list *dmi;
1546 unsigned char *cp; 1546 unsigned char *cp;
1547 struct mc_cmd *cmd; 1547 struct mc_cmd *cmd;
@@ -1550,13 +1550,16 @@ static void set_multicast_list(struct net_device *dev)
1550 return; 1550 return;
1551 cmd = &lp->mc_cmd; 1551 cmd = &lp->mc_cmd;
1552 cmd->cmd.command = CmdMulticastList; 1552 cmd->cmd.command = CmdMulticastList;
1553 cmd->mc_cnt = dev->mc_count * 6; 1553 cmd->mc_cnt = cnt * ETH_ALEN;
1554 cp = cmd->mc_addrs; 1554 cp = cmd->mc_addrs;
1555 for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) { 1555 netdev_for_each_mc_addr(dmi, dev) {
1556 memcpy(cp, dmi->dmi_addr, 6); 1556 if (!cnt--)
1557 break;
1558 memcpy(cp, dmi->dmi_addr, ETH_ALEN);
1557 if (i596_debug > 1) 1559 if (i596_debug > 1)
1558 DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n", 1560 DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n",
1559 dev->name, cp)); 1561 dev->name, cp));
1562 cp += ETH_ALEN;
1560 } 1563 }
1561 i596_add_cmd(dev, &cmd->cmd); 1564 i596_add_cmd(dev, &cmd->cmd);
1562 } 1565 }
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 18300625b05b..7029cd50c458 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -90,6 +90,18 @@ config MACVLAN
90 To compile this driver as a module, choose M here: the module 90 To compile this driver as a module, choose M here: the module
91 will be called macvlan. 91 will be called macvlan.
92 92
93config MACVTAP
94 tristate "MAC-VLAN based tap driver (EXPERIMENTAL)"
95 depends on MACVLAN
96 help
97 This adds a specialized tap character device driver that is based
98 on the MAC-VLAN network interface, called macvtap. A macvtap device
99 can be added in the same way as a macvlan device, using 'type
100 macvlan', and then be accessed through the tap user space interface.
101
102 To compile this driver as a module, choose M here: the module
103 will be called macvtap.
104
93config EQUALIZER 105config EQUALIZER
94 tristate "EQL (serial line load balancing) support" 106 tristate "EQL (serial line load balancing) support"
95 ---help--- 107 ---help---
@@ -868,8 +880,8 @@ config BFIN_RX_DESC_NUM
868 Set the number of buffer packets used in driver. 880 Set the number of buffer packets used in driver.
869 881
870config BFIN_MAC_RMII 882config BFIN_MAC_RMII
871 bool "RMII PHY Interface (EXPERIMENTAL)" 883 bool "RMII PHY Interface"
872 depends on BFIN_MAC && EXPERIMENTAL 884 depends on BFIN_MAC
873 default y if BFIN527_EZKIT 885 default y if BFIN527_EZKIT
874 default n if BFIN537_STAMP 886 default n if BFIN537_STAMP
875 help 887 help
@@ -983,6 +995,14 @@ config ETHOC
983 help 995 help
984 Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC. 996 Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
985 997
998config GRETH
999 tristate "Aeroflex Gaisler GRETH Ethernet MAC support"
1000 depends on SPARC
1001 select PHYLIB
1002 select CRC32
1003 help
1004 Say Y here if you want to use the Aeroflex Gaisler GRETH Ethernet MAC.
1005
986config SMC911X 1006config SMC911X
987 tristate "SMSC LAN911[5678] support" 1007 tristate "SMSC LAN911[5678] support"
988 select CRC32 1008 select CRC32
@@ -1368,6 +1388,17 @@ config AC3200
1368 To compile this driver as a module, choose M here. The module 1388 To compile this driver as a module, choose M here. The module
1369 will be called ac3200. 1389 will be called ac3200.
1370 1390
1391config KSZ884X_PCI
1392 tristate "Micrel KSZ8841/2 PCI"
1393 depends on NET_PCI && PCI
1394 select MII
1395 select CRC32
1396 help
1397 This PCI driver is for Micrel KSZ8841/KSZ8842 PCI Ethernet chip.
1398
1399 To compile this driver as a module, choose M here. The module
1400 will be called ksz884x.
1401
1371config APRICOT 1402config APRICOT
1372 tristate "Apricot Xen-II on board Ethernet" 1403 tristate "Apricot Xen-II on board Ethernet"
1373 depends on NET_PCI && ISA 1404 depends on NET_PCI && ISA
@@ -1883,7 +1914,8 @@ config 68360_ENET
1883 1914
1884config FEC 1915config FEC
1885 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" 1916 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
1886 depends on M523x || M527x || M5272 || M528x || M520x || M532x || MACH_MX27 || ARCH_MX35 || ARCH_MX25 1917 depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
1918 MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5
1887 help 1919 help
1888 Say Y here if you want to use the built-in 10/100 Fast ethernet 1920 Say Y here if you want to use the built-in 10/100 Fast ethernet
1889 controller on some Motorola ColdFire and Freescale i.MX processors. 1921 controller on some Motorola ColdFire and Freescale i.MX processors.
@@ -1939,6 +1971,7 @@ config ATL2
1939config XILINX_EMACLITE 1971config XILINX_EMACLITE
1940 tristate "Xilinx 10/100 Ethernet Lite support" 1972 tristate "Xilinx 10/100 Ethernet Lite support"
1941 depends on PPC32 || MICROBLAZE 1973 depends on PPC32 || MICROBLAZE
1974 select PHYLIB
1942 help 1975 help
1943 This driver supports the 10/100 Ethernet Lite from Xilinx. 1976 This driver supports the 10/100 Ethernet Lite from Xilinx.
1944 1977
@@ -2356,20 +2389,6 @@ config GELIC_WIRELESS
2356 the driver automatically distinguishes the models, you can 2389 the driver automatically distinguishes the models, you can
2357 safely enable this option even if you have a wireless-less model. 2390 safely enable this option even if you have a wireless-less model.
2358 2391
2359config GELIC_WIRELESS_OLD_PSK_INTERFACE
2360 bool "PS3 Wireless private PSK interface (OBSOLETE)"
2361 depends on GELIC_WIRELESS
2362 select WEXT_PRIV
2363 help
2364 This option retains the obsolete private interface to pass
2365 the PSK from user space programs to the driver. The PSK
2366 stands for 'Pre Shared Key' and is used for WPA[2]-PSK
2367 (WPA-Personal) environment.
2368 If WPA[2]-PSK is used and you need to use old programs that
2369 support only this old interface, say Y. Otherwise N.
2370
2371 If unsure, say N.
2372
2373config FSL_PQ_MDIO 2392config FSL_PQ_MDIO
2374 tristate "Freescale PQ MDIO" 2393 tristate "Freescale PQ MDIO"
2375 depends on FSL_SOC 2394 depends on FSL_SOC
@@ -2618,6 +2637,28 @@ config IXGBE_DCB
2618 2637
2619 If unsure, say N. 2638 If unsure, say N.
2620 2639
2640config IXGBEVF
2641 tristate "Intel(R) 82599 Virtual Function Ethernet support"
2642 depends on PCI_MSI
2643 ---help---
2644 This driver supports Intel(R) 82599 virtual functions. For more
2645 information on how to identify your adapter, go to the Adapter &
2646 Driver ID Guide at:
2647
2648 <http://support.intel.com/support/network/sb/CS-008441.htm>
2649
2650 For general information and support, go to the Intel support
2651 website at:
2652
2653 <http://support.intel.com>
2654
2655 More specific information on configuring the driver is in
2656 <file:Documentation/networking/ixgbevf.txt>.
2657
2658 To compile this driver as a module, choose M here. The module
2659 will be called ixgbevf. MSI-X interrupt support is required
2660 for this driver to work correctly.
2661
2621config IXGB 2662config IXGB
2622 tristate "Intel(R) PRO/10GbE support" 2663 tristate "Intel(R) PRO/10GbE support"
2623 depends on PCI 2664 depends on PCI
@@ -2756,6 +2797,13 @@ config BNX2X
2756 To compile this driver as a module, choose M here: the module 2797 To compile this driver as a module, choose M here: the module
2757 will be called bnx2x. This is recommended. 2798 will be called bnx2x. This is recommended.
2758 2799
2800config QLCNIC
2801 tristate "QLOGIC QLCNIC 1/10Gb Converged Ethernet NIC Support"
2802 depends on PCI
2803 help
2804 This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet
2805 devices.
2806
2759config QLGE 2807config QLGE
2760 tristate "QLogic QLGE 10Gb Ethernet Driver Support" 2808 tristate "QLogic QLGE 10Gb Ethernet Driver Support"
2761 depends on PCI 2809 depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index ad1346dd9da9..478886234c28 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/
14obj-$(CONFIG_IGB) += igb/ 14obj-$(CONFIG_IGB) += igb/
15obj-$(CONFIG_IGBVF) += igbvf/ 15obj-$(CONFIG_IGBVF) += igbvf/
16obj-$(CONFIG_IXGBE) += ixgbe/ 16obj-$(CONFIG_IXGBE) += ixgbe/
17obj-$(CONFIG_IXGBEVF) += ixgbevf/
17obj-$(CONFIG_IXGB) += ixgb/ 18obj-$(CONFIG_IXGB) += ixgb/
18obj-$(CONFIG_IP1000) += ipg.o 19obj-$(CONFIG_IP1000) += ipg.o
19obj-$(CONFIG_CHELSIO_T1) += chelsio/ 20obj-$(CONFIG_CHELSIO_T1) += chelsio/
@@ -95,6 +96,7 @@ obj-$(CONFIG_SKFP) += skfp/
95obj-$(CONFIG_KS8842) += ks8842.o 96obj-$(CONFIG_KS8842) += ks8842.o
96obj-$(CONFIG_KS8851) += ks8851.o 97obj-$(CONFIG_KS8851) += ks8851.o
97obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o 98obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
99obj-$(CONFIG_KSZ884X_PCI) += ksz884x.o
98obj-$(CONFIG_VIA_RHINE) += via-rhine.o 100obj-$(CONFIG_VIA_RHINE) += via-rhine.o
99obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o 101obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
100obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o 102obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
@@ -148,6 +150,7 @@ ll_temac-objs := ll_temac_main.o ll_temac_mdio.o
148obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o 150obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o
149obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o 151obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o
150obj-$(CONFIG_QLA3XXX) += qla3xxx.o 152obj-$(CONFIG_QLA3XXX) += qla3xxx.o
153obj-$(CONFIG_QLCNIC) += qlcnic/
151obj-$(CONFIG_QLGE) += qlge/ 154obj-$(CONFIG_QLGE) += qlge/
152 155
153obj-$(CONFIG_PPP) += ppp_generic.o 156obj-$(CONFIG_PPP) += ppp_generic.o
@@ -167,6 +170,7 @@ obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
167obj-$(CONFIG_DUMMY) += dummy.o 170obj-$(CONFIG_DUMMY) += dummy.o
168obj-$(CONFIG_IFB) += ifb.o 171obj-$(CONFIG_IFB) += ifb.o
169obj-$(CONFIG_MACVLAN) += macvlan.o 172obj-$(CONFIG_MACVLAN) += macvlan.o
173obj-$(CONFIG_MACVTAP) += macvtap.o
170obj-$(CONFIG_DE600) += de600.o 174obj-$(CONFIG_DE600) += de600.o
171obj-$(CONFIG_DE620) += de620.o 175obj-$(CONFIG_DE620) += de620.o
172obj-$(CONFIG_LANCE) += lance.o 176obj-$(CONFIG_LANCE) += lance.o
@@ -246,6 +250,7 @@ pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
246obj-$(CONFIG_MLX4_CORE) += mlx4/ 250obj-$(CONFIG_MLX4_CORE) += mlx4/
247obj-$(CONFIG_ENC28J60) += enc28j60.o 251obj-$(CONFIG_ENC28J60) += enc28j60.o
248obj-$(CONFIG_ETHOC) += ethoc.o 252obj-$(CONFIG_ETHOC) += ethoc.o
253obj-$(CONFIG_GRETH) += greth.o
249 254
250obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o 255obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
251 256
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index b7ec0368d7e8..bd4d829eca12 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -603,9 +603,8 @@ static void lance_load_multicast (struct net_device *dev)
603 struct lance_private *lp = netdev_priv(dev); 603 struct lance_private *lp = netdev_priv(dev);
604 volatile struct lance_init_block *ib = lp->init_block; 604 volatile struct lance_init_block *ib = lp->init_block;
605 volatile u16 *mcast_table = (u16 *)&ib->filter; 605 volatile u16 *mcast_table = (u16 *)&ib->filter;
606 struct dev_mc_list *dmi=dev->mc_list; 606 struct dev_mc_list *dmi;
607 char *addrs; 607 char *addrs;
608 int i;
609 u32 crc; 608 u32 crc;
610 609
611 /* set all multicast bits */ 610 /* set all multicast bits */
@@ -619,9 +618,8 @@ static void lance_load_multicast (struct net_device *dev)
619 ib->filter [1] = 0; 618 ib->filter [1] = 0;
620 619
621 /* Add addresses */ 620 /* Add addresses */
622 for (i = 0; i < dev->mc_count; i++){ 621 netdev_for_each_mc_addr(dmi, dev) {
623 addrs = dmi->dmi_addr; 622 addrs = dmi->dmi_addr;
624 dmi = dmi->next;
625 623
626 /* multicast address? */ 624 /* multicast address? */
627 if (!(*addrs & 1)) 625 if (!(*addrs & 1))
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index d82a9a994753..4ae750ef1e10 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -134,7 +134,7 @@
134#define PCI_DEVICE_ID_SGI_ACENIC 0x0009 134#define PCI_DEVICE_ID_SGI_ACENIC 0x0009
135#endif 135#endif
136 136
137static struct pci_device_id acenic_pci_tbl[] = { 137static DEFINE_PCI_DEVICE_TABLE(acenic_pci_tbl) = {
138 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE, 138 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE,
139 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, 139 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
140 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER, 140 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER,
@@ -2845,7 +2845,7 @@ static void ace_set_multicast_list(struct net_device *dev)
2845 * set the entire multicast list at a time and keeping track of 2845 * set the entire multicast list at a time and keeping track of
2846 * it here is going to be messy. 2846 * it here is going to be messy.
2847 */ 2847 */
2848 if ((dev->mc_count) && !(ap->mcast_all)) { 2848 if (!netdev_mc_empty(dev) && !ap->mcast_all) {
2849 cmd.evt = C_SET_MULTICAST_MODE; 2849 cmd.evt = C_SET_MULTICAST_MODE;
2850 cmd.code = C_C_MCAST_ENABLE; 2850 cmd.code = C_C_MCAST_ENABLE;
2851 cmd.idx = 0; 2851 cmd.idx = 0;
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 766aabfdfc75..b8a59d255b49 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -113,7 +113,7 @@ MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0
113module_param_array(dynamic_ipg, bool, NULL, 0); 113module_param_array(dynamic_ipg, bool, NULL, 0);
114MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable"); 114MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
115 115
116static struct pci_device_id amd8111e_pci_tbl[] = { 116static DEFINE_PCI_DEVICE_TABLE(amd8111e_pci_tbl) = {
117 117
118 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462, 118 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
@@ -1176,8 +1176,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
1176 /* Schedule a polling routine */ 1176 /* Schedule a polling routine */
1177 __napi_schedule(&lp->napi); 1177 __napi_schedule(&lp->napi);
1178 } else if (intren0 & RINTEN0) { 1178 } else if (intren0 & RINTEN0) {
1179 printk("************Driver bug! \ 1179 printk("************Driver bug! interrupt while in poll\n");
1180 interrupt while in poll\n");
1181 /* Fix by disable receive interrupts */ 1180 /* Fix by disable receive interrupts */
1182 writel(RINTEN0, mmio + INTEN0); 1181 writel(RINTEN0, mmio + INTEN0);
1183 } 1182 }
@@ -1378,28 +1377,28 @@ list to the device.
1378*/ 1377*/
1379static void amd8111e_set_multicast_list(struct net_device *dev) 1378static void amd8111e_set_multicast_list(struct net_device *dev)
1380{ 1379{
1381 struct dev_mc_list* mc_ptr; 1380 struct dev_mc_list *mc_ptr;
1382 struct amd8111e_priv *lp = netdev_priv(dev); 1381 struct amd8111e_priv *lp = netdev_priv(dev);
1383 u32 mc_filter[2] ; 1382 u32 mc_filter[2] ;
1384 int i,bit_num; 1383 int bit_num;
1384
1385 if(dev->flags & IFF_PROMISC){ 1385 if(dev->flags & IFF_PROMISC){
1386 writel( VAL2 | PROM, lp->mmio + CMD2); 1386 writel( VAL2 | PROM, lp->mmio + CMD2);
1387 return; 1387 return;
1388 } 1388 }
1389 else 1389 else
1390 writel( PROM, lp->mmio + CMD2); 1390 writel( PROM, lp->mmio + CMD2);
1391 if(dev->flags & IFF_ALLMULTI || dev->mc_count > MAX_FILTER_SIZE){ 1391 if (dev->flags & IFF_ALLMULTI ||
1392 netdev_mc_count(dev) > MAX_FILTER_SIZE) {
1392 /* get all multicast packet */ 1393 /* get all multicast packet */
1393 mc_filter[1] = mc_filter[0] = 0xffffffff; 1394 mc_filter[1] = mc_filter[0] = 0xffffffff;
1394 lp->mc_list = dev->mc_list;
1395 lp->options |= OPTION_MULTICAST_ENABLE; 1395 lp->options |= OPTION_MULTICAST_ENABLE;
1396 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF); 1396 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1397 return; 1397 return;
1398 } 1398 }
1399 if( dev->mc_count == 0 ){ 1399 if (netdev_mc_empty(dev)) {
1400 /* get only own packets */ 1400 /* get only own packets */
1401 mc_filter[1] = mc_filter[0] = 0; 1401 mc_filter[1] = mc_filter[0] = 0;
1402 lp->mc_list = NULL;
1403 lp->options &= ~OPTION_MULTICAST_ENABLE; 1402 lp->options &= ~OPTION_MULTICAST_ENABLE;
1404 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF); 1403 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1405 /* disable promiscous mode */ 1404 /* disable promiscous mode */
@@ -1408,10 +1407,8 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
1408 } 1407 }
1409 /* load all the multicast addresses in the logic filter */ 1408 /* load all the multicast addresses in the logic filter */
1410 lp->options |= OPTION_MULTICAST_ENABLE; 1409 lp->options |= OPTION_MULTICAST_ENABLE;
1411 lp->mc_list = dev->mc_list;
1412 mc_filter[1] = mc_filter[0] = 0; 1410 mc_filter[1] = mc_filter[0] = 0;
1413 for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < dev->mc_count; 1411 netdev_for_each_mc_addr(mc_ptr, dev) {
1414 i++, mc_ptr = mc_ptr->next) {
1415 bit_num = (ether_crc_le(ETH_ALEN, mc_ptr->dmi_addr) >> 26) & 0x3f; 1412 bit_num = (ether_crc_le(ETH_ALEN, mc_ptr->dmi_addr) >> 26) & 0x3f;
1416 mc_filter[bit_num >> 5] |= 1 << (bit_num & 31); 1413 mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
1417 } 1414 }
diff --git a/drivers/net/amd8111e.h b/drivers/net/amd8111e.h
index 28c60a71ed50..ac36eb6981e3 100644
--- a/drivers/net/amd8111e.h
+++ b/drivers/net/amd8111e.h
@@ -789,7 +789,6 @@ struct amd8111e_priv{
789 char opened; 789 char opened;
790 struct net_device_stats stats; 790 struct net_device_stats stats;
791 unsigned int drv_rx_errors; 791 unsigned int drv_rx_errors;
792 struct dev_mc_list* mc_list;
793 struct amd8111e_coalesce_conf coal_conf; 792 struct amd8111e_coalesce_conf coal_conf;
794 793
795 struct ipg_info ipg_data; 794 struct ipg_info ipg_data;
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index dbfbd3b7ff86..8ea4ec705bef 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -1125,7 +1125,6 @@ struct net_device * __init ltpc_probe(void)
1125 printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma); 1125 printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma);
1126 1126
1127 dev->netdev_ops = &ltpc_netdev; 1127 dev->netdev_ops = &ltpc_netdev;
1128 dev->mc_list = NULL;
1129 dev->base_addr = io; 1128 dev->base_addr = io;
1130 dev->irq = irq; 1129 dev->irq = irq;
1131 dev->dma = dma; 1130 dev->dma = dma;
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index dbf4de39754d..b68e1eb405ff 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -144,7 +144,7 @@ static void __devexit com20020pci_remove(struct pci_dev *pdev)
144 free_netdev(dev); 144 free_netdev(dev);
145} 145}
146 146
147static struct pci_device_id com20020pci_id_table[] = { 147static DEFINE_PCI_DEVICE_TABLE(com20020pci_id_table) = {
148 { 0x1571, 0xa001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 148 { 0x1571, 0xa001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
149 { 0x1571, 0xa002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 149 { 0x1571, 0xa002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
150 { 0x1571, 0xa003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 150 { 0x1571, 0xa003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index c35af3e106b1..08d8be47dae0 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -123,9 +123,7 @@ static void ariadne_reset(struct net_device *dev);
123static irqreturn_t ariadne_interrupt(int irq, void *data); 123static irqreturn_t ariadne_interrupt(int irq, void *data);
124static int ariadne_close(struct net_device *dev); 124static int ariadne_close(struct net_device *dev);
125static struct net_device_stats *ariadne_get_stats(struct net_device *dev); 125static struct net_device_stats *ariadne_get_stats(struct net_device *dev);
126#ifdef HAVE_MULTICAST
127static void set_multicast_list(struct net_device *dev); 126static void set_multicast_list(struct net_device *dev);
128#endif
129 127
130 128
131static void memcpyw(volatile u_short *dest, u_short *src, int len) 129static void memcpyw(volatile u_short *dest, u_short *src, int len)
@@ -821,7 +819,7 @@ static void set_multicast_list(struct net_device *dev)
821 lance->RDP = PROM; /* Set promiscuous mode */ 819 lance->RDP = PROM; /* Set promiscuous mode */
822 } else { 820 } else {
823 short multicast_table[4]; 821 short multicast_table[4];
824 int num_addrs = dev->mc_count; 822 int num_addrs = netdev_mc_count(dev);
825 int i; 823 int i;
826 /* We don't use the multicast table, but rely on upper-layer filtering. */ 824 /* We don't use the multicast table, but rely on upper-layer filtering. */
827 memset(multicast_table, (num_addrs == 0) ? 0 : -1, 825 memset(multicast_table, (num_addrs == 0) ? 0 : -1,
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 164b37e85eea..f1f58c5e27bf 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -351,13 +351,13 @@ static struct net_device_stats *am79c961_getstats (struct net_device *dev)
351 return &priv->stats; 351 return &priv->stats;
352} 352}
353 353
354static void am79c961_mc_hash(struct dev_mc_list *dmi, unsigned short *hash) 354static void am79c961_mc_hash(char *addr, unsigned short *hash)
355{ 355{
356 if (dmi->dmi_addrlen == ETH_ALEN && dmi->dmi_addr[0] & 0x01) { 356 if (addr[0] & 0x01) {
357 int idx, bit; 357 int idx, bit;
358 u32 crc; 358 u32 crc;
359 359
360 crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr); 360 crc = ether_crc_le(ETH_ALEN, addr);
361 361
362 idx = crc >> 30; 362 idx = crc >> 30;
363 bit = (crc >> 26) & 15; 363 bit = (crc >> 26) & 15;
@@ -387,8 +387,8 @@ static void am79c961_setmulticastlist (struct net_device *dev)
387 387
388 memset(multi_hash, 0x00, sizeof(multi_hash)); 388 memset(multi_hash, 0x00, sizeof(multi_hash));
389 389
390 for (dmi = dev->mc_list; dmi; dmi = dmi->next) 390 netdev_for_each_mc_addr(dmi, dev)
391 am79c961_mc_hash(dmi, multi_hash); 391 am79c961_mc_hash(dmi->dmi_addr, multi_hash);
392 } 392 }
393 393
394 spin_lock_irqsave(&priv->chip_lock, flags); 394 spin_lock_irqsave(&priv->chip_lock, flags);
@@ -680,7 +680,7 @@ static const struct net_device_ops am79c961_netdev_ops = {
680#endif 680#endif
681}; 681};
682 682
683static int __init am79c961_probe(struct platform_device *pdev) 683static int __devinit am79c961_probe(struct platform_device *pdev)
684{ 684{
685 struct resource *res; 685 struct resource *res;
686 struct net_device *dev; 686 struct net_device *dev;
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index c8bc60a7040c..8b23d5a175bf 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -558,14 +558,11 @@ static void at91ether_sethashtable(struct net_device *dev)
558{ 558{
559 struct dev_mc_list *curr; 559 struct dev_mc_list *curr;
560 unsigned long mc_filter[2]; 560 unsigned long mc_filter[2];
561 unsigned int i, bitnr; 561 unsigned int bitnr;
562 562
563 mc_filter[0] = mc_filter[1] = 0; 563 mc_filter[0] = mc_filter[1] = 0;
564 564
565 curr = dev->mc_list; 565 netdev_for_each_mc_addr(curr, dev) {
566 for (i = 0; i < dev->mc_count; i++, curr = curr->next) {
567 if (!curr) break; /* unexpected end of list */
568
569 bitnr = hash_get_index(curr->dmi_addr); 566 bitnr = hash_get_index(curr->dmi_addr);
570 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); 567 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
571 } 568 }
@@ -592,7 +589,7 @@ static void at91ether_set_multicast_list(struct net_device *dev)
592 at91_emac_write(AT91_EMAC_HSH, -1); 589 at91_emac_write(AT91_EMAC_HSH, -1);
593 at91_emac_write(AT91_EMAC_HSL, -1); 590 at91_emac_write(AT91_EMAC_HSL, -1);
594 cfg |= AT91_EMAC_MTI; 591 cfg |= AT91_EMAC_MTI;
595 } else if (dev->mc_count > 0) { /* Enable specific multicasts */ 592 } else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */
596 at91ether_sethashtable(dev); 593 at91ether_sethashtable(dev);
597 cfg |= AT91_EMAC_MTI; 594 cfg |= AT91_EMAC_MTI;
598 } else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */ 595 } else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index b25467ac895c..bf72d57a0afd 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -9,6 +9,8 @@
9 * (at your option) any later version. 9 * (at your option) any later version.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
13
12#include <linux/dma-mapping.h> 14#include <linux/dma-mapping.h>
13#include <linux/module.h> 15#include <linux/module.h>
14#include <linux/kernel.h> 16#include <linux/kernel.h>
@@ -20,9 +22,9 @@
20#include <linux/moduleparam.h> 22#include <linux/moduleparam.h>
21#include <linux/platform_device.h> 23#include <linux/platform_device.h>
22#include <linux/delay.h> 24#include <linux/delay.h>
23#include <mach/ep93xx-regs.h> 25#include <linux/io.h>
24#include <mach/platform.h> 26
25#include <asm/io.h> 27#include <mach/hardware.h>
26 28
27#define DRV_MODULE_NAME "ep93xx-eth" 29#define DRV_MODULE_NAME "ep93xx-eth"
28#define DRV_MODULE_VERSION "0.1" 30#define DRV_MODULE_VERSION "0.1"
@@ -185,7 +187,47 @@ struct ep93xx_priv
185#define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off)) 187#define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off))
186#define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off)) 188#define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off))
187 189
188static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg); 190static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg)
191{
192 struct ep93xx_priv *ep = netdev_priv(dev);
193 int data;
194 int i;
195
196 wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg);
197
198 for (i = 0; i < 10; i++) {
199 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
200 break;
201 msleep(1);
202 }
203
204 if (i == 10) {
205 pr_info("mdio read timed out\n");
206 data = 0xffff;
207 } else {
208 data = rdl(ep, REG_MIIDATA);
209 }
210
211 return data;
212}
213
214static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data)
215{
216 struct ep93xx_priv *ep = netdev_priv(dev);
217 int i;
218
219 wrl(ep, REG_MIIDATA, data);
220 wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg);
221
222 for (i = 0; i < 10; i++) {
223 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
224 break;
225 msleep(1);
226 }
227
228 if (i == 10)
229 pr_info("mdio write timed out\n");
230}
189 231
190static struct net_device_stats *ep93xx_get_stats(struct net_device *dev) 232static struct net_device_stats *ep93xx_get_stats(struct net_device *dev)
191{ 233{
@@ -217,14 +259,11 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
217 rstat->rstat1 = 0; 259 rstat->rstat1 = 0;
218 260
219 if (!(rstat0 & RSTAT0_EOF)) 261 if (!(rstat0 & RSTAT0_EOF))
220 printk(KERN_CRIT "ep93xx_rx: not end-of-frame " 262 pr_crit("not end-of-frame %.8x %.8x\n", rstat0, rstat1);
221 " %.8x %.8x\n", rstat0, rstat1);
222 if (!(rstat0 & RSTAT0_EOB)) 263 if (!(rstat0 & RSTAT0_EOB))
223 printk(KERN_CRIT "ep93xx_rx: not end-of-buffer " 264 pr_crit("not end-of-buffer %.8x %.8x\n", rstat0, rstat1);
224 " %.8x %.8x\n", rstat0, rstat1);
225 if ((rstat1 & RSTAT1_BUFFER_INDEX) >> 16 != entry) 265 if ((rstat1 & RSTAT1_BUFFER_INDEX) >> 16 != entry)
226 printk(KERN_CRIT "ep93xx_rx: entry mismatch " 266 pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1);
227 " %.8x %.8x\n", rstat0, rstat1);
228 267
229 if (!(rstat0 & RSTAT0_RWE)) { 268 if (!(rstat0 & RSTAT0_RWE)) {
230 ep->stats.rx_errors++; 269 ep->stats.rx_errors++;
@@ -241,8 +280,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
241 280
242 length = rstat1 & RSTAT1_FRAME_LENGTH; 281 length = rstat1 & RSTAT1_FRAME_LENGTH;
243 if (length > MAX_PKT_SIZE) { 282 if (length > MAX_PKT_SIZE) {
244 printk(KERN_NOTICE "ep93xx_rx: invalid length " 283 pr_notice("invalid length %.8x %.8x\n", rstat0, rstat1);
245 " %.8x %.8x\n", rstat0, rstat1);
246 goto err; 284 goto err;
247 } 285 }
248 286
@@ -371,11 +409,9 @@ static void ep93xx_tx_complete(struct net_device *dev)
371 tstat->tstat0 = 0; 409 tstat->tstat0 = 0;
372 410
373 if (tstat0 & TSTAT0_FA) 411 if (tstat0 & TSTAT0_FA)
374 printk(KERN_CRIT "ep93xx_tx_complete: frame aborted " 412 pr_crit("frame aborted %.8x\n", tstat0);
375 " %.8x\n", tstat0);
376 if ((tstat0 & TSTAT0_BUFFER_INDEX) != entry) 413 if ((tstat0 & TSTAT0_BUFFER_INDEX) != entry)
377 printk(KERN_CRIT "ep93xx_tx_complete: entry mismatch " 414 pr_crit("entry mismatch %.8x\n", tstat0);
378 " %.8x\n", tstat0);
379 415
380 if (tstat0 & TSTAT0_TXWE) { 416 if (tstat0 & TSTAT0_TXWE) {
381 int length = ep->descs->tdesc[entry].tdesc1 & 0xfff; 417 int length = ep->descs->tdesc[entry].tdesc1 & 0xfff;
@@ -536,7 +572,7 @@ static int ep93xx_start_hw(struct net_device *dev)
536 } 572 }
537 573
538 if (i == 10) { 574 if (i == 10) {
539 printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to reset\n"); 575 pr_crit("hw failed to reset\n");
540 return 1; 576 return 1;
541 } 577 }
542 578
@@ -581,7 +617,7 @@ static int ep93xx_start_hw(struct net_device *dev)
581 } 617 }
582 618
583 if (i == 10) { 619 if (i == 10) {
584 printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to start\n"); 620 pr_crit("hw failed to start\n");
585 return 1; 621 return 1;
586 } 622 }
587 623
@@ -617,7 +653,7 @@ static void ep93xx_stop_hw(struct net_device *dev)
617 } 653 }
618 654
619 if (i == 10) 655 if (i == 10)
620 printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to reset\n"); 656 pr_crit("hw failed to reset\n");
621} 657}
622 658
623static int ep93xx_open(struct net_device *dev) 659static int ep93xx_open(struct net_device *dev)
@@ -681,48 +717,6 @@ static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
681 return generic_mii_ioctl(&ep->mii, data, cmd, NULL); 717 return generic_mii_ioctl(&ep->mii, data, cmd, NULL);
682} 718}
683 719
684static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg)
685{
686 struct ep93xx_priv *ep = netdev_priv(dev);
687 int data;
688 int i;
689
690 wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg);
691
692 for (i = 0; i < 10; i++) {
693 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
694 break;
695 msleep(1);
696 }
697
698 if (i == 10) {
699 printk(KERN_INFO DRV_MODULE_NAME ": mdio read timed out\n");
700 data = 0xffff;
701 } else {
702 data = rdl(ep, REG_MIIDATA);
703 }
704
705 return data;
706}
707
708static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data)
709{
710 struct ep93xx_priv *ep = netdev_priv(dev);
711 int i;
712
713 wrl(ep, REG_MIIDATA, data);
714 wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg);
715
716 for (i = 0; i < 10; i++) {
717 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
718 break;
719 msleep(1);
720 }
721
722 if (i == 10)
723 printk(KERN_INFO DRV_MODULE_NAME ": mdio write timed out\n");
724}
725
726static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 720static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
727{ 721{
728 strcpy(info->driver, DRV_MODULE_NAME); 722 strcpy(info->driver, DRV_MODULE_NAME);
@@ -825,12 +819,19 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
825 struct ep93xx_eth_data *data; 819 struct ep93xx_eth_data *data;
826 struct net_device *dev; 820 struct net_device *dev;
827 struct ep93xx_priv *ep; 821 struct ep93xx_priv *ep;
822 struct resource *mem;
823 int irq;
828 int err; 824 int err;
829 825
830 if (pdev == NULL) 826 if (pdev == NULL)
831 return -ENODEV; 827 return -ENODEV;
832 data = pdev->dev.platform_data; 828 data = pdev->dev.platform_data;
833 829
830 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
831 irq = platform_get_irq(pdev, 0);
832 if (!mem || irq < 0)
833 return -ENXIO;
834
834 dev = ep93xx_dev_alloc(data); 835 dev = ep93xx_dev_alloc(data);
835 if (dev == NULL) { 836 if (dev == NULL) {
836 err = -ENOMEM; 837 err = -ENOMEM;
@@ -842,23 +843,21 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
842 843
843 platform_set_drvdata(pdev, dev); 844 platform_set_drvdata(pdev, dev);
844 845
845 ep->res = request_mem_region(pdev->resource[0].start, 846 ep->res = request_mem_region(mem->start, resource_size(mem),
846 pdev->resource[0].end - pdev->resource[0].start + 1, 847 dev_name(&pdev->dev));
847 dev_name(&pdev->dev));
848 if (ep->res == NULL) { 848 if (ep->res == NULL) {
849 dev_err(&pdev->dev, "Could not reserve memory region\n"); 849 dev_err(&pdev->dev, "Could not reserve memory region\n");
850 err = -ENOMEM; 850 err = -ENOMEM;
851 goto err_out; 851 goto err_out;
852 } 852 }
853 853
854 ep->base_addr = ioremap(pdev->resource[0].start, 854 ep->base_addr = ioremap(mem->start, resource_size(mem));
855 pdev->resource[0].end - pdev->resource[0].start);
856 if (ep->base_addr == NULL) { 855 if (ep->base_addr == NULL) {
857 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); 856 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
858 err = -EIO; 857 err = -EIO;
859 goto err_out; 858 goto err_out;
860 } 859 }
861 ep->irq = pdev->resource[1].start; 860 ep->irq = irq;
862 861
863 ep->mii.phy_id = data->phy_id; 862 ep->mii.phy_id = data->phy_id;
864 ep->mii.phy_id_mask = 0x1f; 863 ep->mii.phy_id_mask = 0x1f;
@@ -877,11 +876,8 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
877 goto err_out; 876 goto err_out;
878 } 877 }
879 878
880 printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, " 879 printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, %pM\n",
881 "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x.\n", dev->name, 880 dev->name, ep->irq, dev->dev_addr);
882 ep->irq, data->dev_addr[0], data->dev_addr[1],
883 data->dev_addr[2], data->dev_addr[3],
884 data->dev_addr[4], data->dev_addr[5]);
885 881
886 return 0; 882 return 0;
887 883
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index 1f7a69c929a6..d9de9bce2395 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -463,7 +463,7 @@ static void ether3_setmulticastlist(struct net_device *dev)
463 if (dev->flags & IFF_PROMISC) { 463 if (dev->flags & IFF_PROMISC) {
464 /* promiscuous mode */ 464 /* promiscuous mode */
465 priv(dev)->regs.config1 |= CFG1_RECVPROMISC; 465 priv(dev)->regs.config1 |= CFG1_RECVPROMISC;
466 } else if (dev->flags & IFF_ALLMULTI || dev->mc_count) { 466 } else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
467 priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI; 467 priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI;
468 } else 468 } else
469 priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD; 469 priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD;
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index c3dfbdd2cdcf..6e2ae1d06df1 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -735,22 +735,25 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
735static void eth_set_mcast_list(struct net_device *dev) 735static void eth_set_mcast_list(struct net_device *dev)
736{ 736{
737 struct port *port = netdev_priv(dev); 737 struct port *port = netdev_priv(dev);
738 struct dev_mc_list *mclist = dev->mc_list; 738 struct dev_mc_list *mclist;
739 u8 diffs[ETH_ALEN], *addr; 739 u8 diffs[ETH_ALEN], *addr;
740 int cnt = dev->mc_count, i; 740 int i;
741 741
742 if ((dev->flags & IFF_PROMISC) || !mclist || !cnt) { 742 if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) {
743 __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN, 743 __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
744 &port->regs->rx_control[0]); 744 &port->regs->rx_control[0]);
745 return; 745 return;
746 } 746 }
747 747
748 memset(diffs, 0, ETH_ALEN); 748 memset(diffs, 0, ETH_ALEN);
749 addr = mclist->dmi_addr; /* first MAC address */
750 749
751 while (--cnt && (mclist = mclist->next)) 750 addr = NULL;
751 netdev_for_each_mc_addr(mclist, dev) {
752 if (!addr)
753 addr = mclist->dmi_addr; /* first MAC address */
752 for (i = 0; i < ETH_ALEN; i++) 754 for (i = 0; i < ETH_ALEN; i++)
753 diffs[i] |= addr[i] ^ mclist->dmi_addr[i]; 755 diffs[i] |= addr[i] ^ mclist->dmi_addr[i];
756 }
754 757
755 for (i = 0; i < ETH_ALEN; i++) { 758 for (i = 0; i < ETH_ALEN; i++) {
756 __raw_writel(addr[i], &port->regs->mcast_addr[i]); 759 __raw_writel(addr[i], &port->regs->mcast_addr[i]);
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index be256b34cea8..8ca639127dbc 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -327,25 +327,24 @@ ks8695_refill_rxbuffers(struct ks8695_priv *ksp)
327 */ 327 */
328static void 328static void
329ks8695_init_partial_multicast(struct ks8695_priv *ksp, 329ks8695_init_partial_multicast(struct ks8695_priv *ksp,
330 struct dev_mc_list *addr, 330 struct net_device *ndev)
331 int nr_addr)
332{ 331{
333 u32 low, high; 332 u32 low, high;
334 int i; 333 int i;
334 struct dev_mc_list *dmi;
335 335
336 for (i = 0; i < nr_addr; i++, addr = addr->next) { 336 i = 0;
337 /* Ran out of addresses? */ 337 netdev_for_each_mc_addr(dmi, ndev) {
338 if (!addr)
339 break;
340 /* Ran out of space in chip? */ 338 /* Ran out of space in chip? */
341 BUG_ON(i == KS8695_NR_ADDRESSES); 339 BUG_ON(i == KS8695_NR_ADDRESSES);
342 340
343 low = (addr->dmi_addr[2] << 24) | (addr->dmi_addr[3] << 16) | 341 low = (dmi->dmi_addr[2] << 24) | (dmi->dmi_addr[3] << 16) |
344 (addr->dmi_addr[4] << 8) | (addr->dmi_addr[5]); 342 (dmi->dmi_addr[4] << 8) | (dmi->dmi_addr[5]);
345 high = (addr->dmi_addr[0] << 8) | (addr->dmi_addr[1]); 343 high = (dmi->dmi_addr[0] << 8) | (dmi->dmi_addr[1]);
346 344
347 ks8695_writereg(ksp, KS8695_AAL_(i), low); 345 ks8695_writereg(ksp, KS8695_AAL_(i), low);
348 ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high); 346 ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high);
347 i++;
349 } 348 }
350 349
351 /* Clear the remaining Additional Station Addresses */ 350 /* Clear the remaining Additional Station Addresses */
@@ -1207,7 +1206,7 @@ ks8695_set_multicast(struct net_device *ndev)
1207 if (ndev->flags & IFF_ALLMULTI) { 1206 if (ndev->flags & IFF_ALLMULTI) {
1208 /* enable all multicast mode */ 1207 /* enable all multicast mode */
1209 ctrl |= DRXC_RM; 1208 ctrl |= DRXC_RM;
1210 } else if (ndev->mc_count > KS8695_NR_ADDRESSES) { 1209 } else if (netdev_mc_count(ndev) > KS8695_NR_ADDRESSES) {
1211 /* more specific multicast addresses than can be 1210 /* more specific multicast addresses than can be
1212 * handled in hardware 1211 * handled in hardware
1213 */ 1212 */
@@ -1215,8 +1214,7 @@ ks8695_set_multicast(struct net_device *ndev)
1215 } else { 1214 } else {
1216 /* enable specific multicasts */ 1215 /* enable specific multicasts */
1217 ctrl &= ~DRXC_RM; 1216 ctrl &= ~DRXC_RM;
1218 ks8695_init_partial_multicast(ksp, ndev->mc_list, 1217 ks8695_init_partial_multicast(ksp, ndev);
1219 ndev->mc_count);
1220 } 1218 }
1221 1219
1222 ks8695_writereg(ksp, KS8695_DRXC, ctrl); 1220 ks8695_writereg(ksp, KS8695_DRXC, ctrl);
@@ -1335,7 +1333,6 @@ ks8695_stop(struct net_device *ndev)
1335 1333
1336 netif_stop_queue(ndev); 1334 netif_stop_queue(ndev);
1337 napi_disable(&ksp->napi); 1335 napi_disable(&ksp->napi);
1338 netif_carrier_off(ndev);
1339 1336
1340 ks8695_shutdown(ksp); 1337 ks8695_shutdown(ksp);
1341 1338
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index b7f3866d546f..febd813c916d 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -858,10 +858,10 @@ static void w90p910_ether_set_multicast_list(struct net_device *dev)
858 858
859 if (dev->flags & IFF_PROMISC) 859 if (dev->flags & IFF_PROMISC)
860 rx_mode = CAMCMR_AUP | CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP; 860 rx_mode = CAMCMR_AUP | CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
861 else if ((dev->flags & IFF_ALLMULTI) || dev->mc_list) 861 else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
862 rx_mode = CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP; 862 rx_mode = CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
863 else 863 else
864 rx_mode = CAMCMR_ECMP | CAMCMR_ABP; 864 rx_mode = CAMCMR_ECMP | CAMCMR_ABP;
865 __raw_writel(rx_mode, ether->reg + REG_CAMCMR); 865 __raw_writel(rx_mode, ether->reg + REG_CAMCMR);
866} 866}
867 867
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index b14f4799d5d1..309843ab8869 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -839,21 +839,19 @@ set_rx_mode(struct net_device *dev)
839 if (dev->flags & IFF_PROMISC) { 839 if (dev->flags & IFF_PROMISC) {
840 memset(mc_filter, 0xff, sizeof(mc_filter)); 840 memset(mc_filter, 0xff, sizeof(mc_filter));
841 outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */ 841 outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
842 } else if (dev->mc_count > MC_FILTERBREAK || 842 } else if (netdev_mc_count(dev) > MC_FILTERBREAK ||
843 (dev->flags & IFF_ALLMULTI)) { 843 (dev->flags & IFF_ALLMULTI)) {
844 /* Too many to filter perfectly -- accept all multicasts. */ 844 /* Too many to filter perfectly -- accept all multicasts. */
845 memset(mc_filter, 0xff, sizeof(mc_filter)); 845 memset(mc_filter, 0xff, sizeof(mc_filter));
846 outb(2, ioaddr + RX_MODE); /* Use normal mode. */ 846 outb(2, ioaddr + RX_MODE); /* Use normal mode. */
847 } else if (dev->mc_count == 0) { 847 } else if (netdev_mc_empty(dev)) {
848 memset(mc_filter, 0x00, sizeof(mc_filter)); 848 memset(mc_filter, 0x00, sizeof(mc_filter));
849 outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */ 849 outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
850 } else { 850 } else {
851 struct dev_mc_list *mclist; 851 struct dev_mc_list *mclist;
852 int i;
853 852
854 memset(mc_filter, 0, sizeof(mc_filter)); 853 memset(mc_filter, 0, sizeof(mc_filter));
855 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 854 netdev_for_each_mc_addr(mclist, dev) {
856 i++, mclist = mclist->next) {
857 unsigned int bit = 855 unsigned int bit =
858 ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26; 856 ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
859 mc_filter[bit >> 3] |= (1 << bit); 857 mc_filter[bit >> 3] |= (1 << bit);
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index cc9ed8643910..280cfff48b49 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -1097,7 +1097,7 @@ static void set_multicast_list( struct net_device *dev )
1097 REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */ 1097 REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
1098 } else { 1098 } else {
1099 short multicast_table[4]; 1099 short multicast_table[4];
1100 int num_addrs = dev->mc_count; 1100 int num_addrs = netdev_mc_count(dev);
1101 int i; 1101 int i;
1102 /* We don't use the multicast table, but rely on upper-layer 1102 /* We don't use the multicast table, but rely on upper-layer
1103 * filtering. */ 1103 * filtering. */
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index efe5435bc3d3..84ae905bf732 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -313,6 +313,9 @@ enum atl1c_rss_type {
313enum atl1c_nic_type { 313enum atl1c_nic_type {
314 athr_l1c = 0, 314 athr_l1c = 0,
315 athr_l2c = 1, 315 athr_l2c = 1,
316 athr_l2c_b,
317 athr_l2c_b2,
318 athr_l1d,
316}; 319};
317 320
318enum atl1c_trans_queue { 321enum atl1c_trans_queue {
@@ -426,8 +429,12 @@ struct atl1c_hw {
426#define ATL1C_ASPM_L1_SUPPORT 0x0100 429#define ATL1C_ASPM_L1_SUPPORT 0x0100
427#define ATL1C_ASPM_CTRL_MON 0x0200 430#define ATL1C_ASPM_CTRL_MON 0x0200
428#define ATL1C_HIB_DISABLE 0x0400 431#define ATL1C_HIB_DISABLE 0x0400
429#define ATL1C_LINK_CAP_1000M 0x0800 432#define ATL1C_APS_MODE_ENABLE 0x0800
430#define ATL1C_FPGA_VERSION 0x8000 433#define ATL1C_LINK_EXT_SYNC 0x1000
434#define ATL1C_CLK_GATING_EN 0x2000
435#define ATL1C_FPGA_VERSION 0x8000
436 u16 link_cap_flags;
437#define ATL1C_LINK_CAP_1000M 0x0001
431 u16 cmb_tpd; 438 u16 cmb_tpd;
432 u16 cmb_rrd; 439 u16 cmb_rrd;
433 u16 cmb_rx_timer; /* 2us resolution */ 440 u16 cmb_rx_timer; /* 2us resolution */
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
index 9b1e0eaebb5c..61a0f2ff11e9 100644
--- a/drivers/net/atl1c/atl1c_ethtool.c
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -37,7 +37,7 @@ static int atl1c_get_settings(struct net_device *netdev,
37 SUPPORTED_100baseT_Full | 37 SUPPORTED_100baseT_Full |
38 SUPPORTED_Autoneg | 38 SUPPORTED_Autoneg |
39 SUPPORTED_TP); 39 SUPPORTED_TP);
40 if (hw->ctrl_flags & ATL1C_LINK_CAP_1000M) 40 if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M)
41 ecmd->supported |= SUPPORTED_1000baseT_Full; 41 ecmd->supported |= SUPPORTED_1000baseT_Full;
42 42
43 ecmd->advertising = ADVERTISED_TP; 43 ecmd->advertising = ADVERTISED_TP;
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index 3e69b940b8f7..f1389d664a21 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -70,17 +70,39 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
70 u32 otp_ctrl_data; 70 u32 otp_ctrl_data;
71 u32 twsi_ctrl_data; 71 u32 twsi_ctrl_data;
72 u8 eth_addr[ETH_ALEN]; 72 u8 eth_addr[ETH_ALEN];
73 u16 phy_data;
74 bool raise_vol = false;
73 75
74 /* init */ 76 /* init */
75 addr[0] = addr[1] = 0; 77 addr[0] = addr[1] = 0;
76 AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); 78 AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data);
77 if (atl1c_check_eeprom_exist(hw)) { 79 if (atl1c_check_eeprom_exist(hw)) {
78 /* Enable OTP CLK */ 80 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b) {
79 if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) { 81 /* Enable OTP CLK */
80 otp_ctrl_data |= OTP_CTRL_CLK_EN; 82 if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) {
81 AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); 83 otp_ctrl_data |= OTP_CTRL_CLK_EN;
82 AT_WRITE_FLUSH(hw); 84 AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
83 msleep(1); 85 AT_WRITE_FLUSH(hw);
86 msleep(1);
87 }
88 }
89
90 if (hw->nic_type == athr_l2c_b ||
91 hw->nic_type == athr_l2c_b2 ||
92 hw->nic_type == athr_l1d) {
93 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00);
94 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
95 goto out;
96 phy_data &= 0xFF7F;
97 atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
98
99 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
100 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
101 goto out;
102 phy_data |= 0x8;
103 atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
104 udelay(20);
105 raise_vol = true;
84 } 106 }
85 107
86 AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data); 108 AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data);
@@ -96,11 +118,31 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
96 return -1; 118 return -1;
97 } 119 }
98 /* Disable OTP_CLK */ 120 /* Disable OTP_CLK */
99 if (otp_ctrl_data & OTP_CTRL_CLK_EN) { 121 if ((hw->nic_type == athr_l1c || hw->nic_type == athr_l2c)) {
100 otp_ctrl_data &= ~OTP_CTRL_CLK_EN; 122 if (otp_ctrl_data & OTP_CTRL_CLK_EN) {
101 AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); 123 otp_ctrl_data &= ~OTP_CTRL_CLK_EN;
102 AT_WRITE_FLUSH(hw); 124 AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
103 msleep(1); 125 AT_WRITE_FLUSH(hw);
126 msleep(1);
127 }
128 }
129 if (raise_vol) {
130 if (hw->nic_type == athr_l2c_b ||
131 hw->nic_type == athr_l2c_b2 ||
132 hw->nic_type == athr_l1d) {
133 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00);
134 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
135 goto out;
136 phy_data |= 0x80;
137 atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
138
139 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
140 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
141 goto out;
142 phy_data &= 0xFFF7;
143 atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
144 udelay(20);
145 }
104 } 146 }
105 147
106 /* maybe MAC-address is from BIOS */ 148 /* maybe MAC-address is from BIOS */
@@ -114,6 +156,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
114 return 0; 156 return 0;
115 } 157 }
116 158
159out:
117 return -1; 160 return -1;
118} 161}
119 162
@@ -307,7 +350,7 @@ static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
307 mii_adv_data |= ADVERTISE_10HALF | ADVERTISE_10FULL | 350 mii_adv_data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
308 ADVERTISE_100HALF | ADVERTISE_100FULL; 351 ADVERTISE_100HALF | ADVERTISE_100FULL;
309 352
310 if (hw->ctrl_flags & ATL1C_LINK_CAP_1000M) { 353 if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M) {
311 if (hw->autoneg_advertised & ADVERTISED_1000baseT_Half) 354 if (hw->autoneg_advertised & ADVERTISED_1000baseT_Half)
312 mii_giga_ctrl_data |= ADVERTISE_1000HALF; 355 mii_giga_ctrl_data |= ADVERTISE_1000HALF;
313 if (hw->autoneg_advertised & ADVERTISED_1000baseT_Full) 356 if (hw->autoneg_advertised & ADVERTISED_1000baseT_Full)
@@ -389,6 +432,7 @@ int atl1c_phy_reset(struct atl1c_hw *hw)
389{ 432{
390 struct atl1c_adapter *adapter = hw->adapter; 433 struct atl1c_adapter *adapter = hw->adapter;
391 struct pci_dev *pdev = adapter->pdev; 434 struct pci_dev *pdev = adapter->pdev;
435 u16 phy_data;
392 u32 phy_ctrl_data = GPHY_CTRL_DEFAULT; 436 u32 phy_ctrl_data = GPHY_CTRL_DEFAULT;
393 u32 mii_ier_data = IER_LINK_UP | IER_LINK_DOWN; 437 u32 mii_ier_data = IER_LINK_UP | IER_LINK_DOWN;
394 int err; 438 int err;
@@ -404,6 +448,21 @@ int atl1c_phy_reset(struct atl1c_hw *hw)
404 AT_WRITE_FLUSH(hw); 448 AT_WRITE_FLUSH(hw);
405 msleep(10); 449 msleep(10);
406 450
451 if (hw->nic_type == athr_l2c_b) {
452 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x0A);
453 atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data);
454 atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xDFFF);
455 }
456
457 if (hw->nic_type == athr_l2c_b ||
458 hw->nic_type == athr_l2c_b2 ||
459 hw->nic_type == athr_l1d) {
460 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
461 atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data);
462 atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xFFF7);
463 msleep(20);
464 }
465
407 /*Enable PHY LinkChange Interrupt */ 466 /*Enable PHY LinkChange Interrupt */
408 err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data); 467 err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data);
409 if (err) { 468 if (err) {
diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h
index c2c738df5c63..1eeb3ed9f0cb 100644
--- a/drivers/net/atl1c/atl1c_hw.h
+++ b/drivers/net/atl1c/atl1c_hw.h
@@ -57,6 +57,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
57#define REG_LINK_CTRL 0x68 57#define REG_LINK_CTRL 0x68
58#define LINK_CTRL_L0S_EN 0x01 58#define LINK_CTRL_L0S_EN 0x01
59#define LINK_CTRL_L1_EN 0x02 59#define LINK_CTRL_L1_EN 0x02
60#define LINK_CTRL_EXT_SYNC 0x80
60 61
61#define REG_VPD_CAP 0x6C 62#define REG_VPD_CAP 0x6C
62#define VPD_CAP_ID_MASK 0xff 63#define VPD_CAP_ID_MASK 0xff
@@ -156,6 +157,8 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
156#define PM_CTRL_PM_REQ_TIMER_SHIFT 20 157#define PM_CTRL_PM_REQ_TIMER_SHIFT 20
157#define PM_CTRL_LCKDET_TIMER_MASK 0x3F 158#define PM_CTRL_LCKDET_TIMER_MASK 0x3F
158#define PM_CTRL_LCKDET_TIMER_SHIFT 24 159#define PM_CTRL_LCKDET_TIMER_SHIFT 24
160#define PM_CTRL_EN_BUFS_RX_L0S 0x10000000
161#define PM_CTRL_SA_DLY_EN 0x20000000
159#define PM_CTRL_MAC_ASPM_CHK 0x40000000 162#define PM_CTRL_MAC_ASPM_CHK 0x40000000
160#define PM_CTRL_HOTRST 0x80000000 163#define PM_CTRL_HOTRST 0x80000000
161 164
@@ -314,6 +317,8 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
314#define MAC_CTRL_BC_EN 0x4000000 317#define MAC_CTRL_BC_EN 0x4000000
315#define MAC_CTRL_DBG 0x8000000 318#define MAC_CTRL_DBG 0x8000000
316#define MAC_CTRL_SINGLE_PAUSE_EN 0x10000000 319#define MAC_CTRL_SINGLE_PAUSE_EN 0x10000000
320#define MAC_CTRL_HASH_ALG_CRC32 0x20000000
321#define MAC_CTRL_SPEED_MODE_SW 0x40000000
317 322
318/* MAC IPG/IFG Control Register */ 323/* MAC IPG/IFG Control Register */
319#define REG_MAC_IPG_IFG 0x1484 324#define REG_MAC_IPG_IFG 0x1484
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 2f4be59b9c0b..50dc531a02d8 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -21,11 +21,18 @@
21 21
22#include "atl1c.h" 22#include "atl1c.h"
23 23
24#define ATL1C_DRV_VERSION "1.0.0.1-NAPI" 24#define ATL1C_DRV_VERSION "1.0.0.2-NAPI"
25char atl1c_driver_name[] = "atl1c"; 25char atl1c_driver_name[] = "atl1c";
26char atl1c_driver_version[] = ATL1C_DRV_VERSION; 26char atl1c_driver_version[] = ATL1C_DRV_VERSION;
27#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062 27#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062
28#define PCI_DEVICE_ID_ATTANSIC_L1C 0x1063 28#define PCI_DEVICE_ID_ATTANSIC_L1C 0x1063
29#define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */
30#define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */
31#define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */
32
33#define L2CB_V10 0xc0
34#define L2CB_V11 0xc1
35
29/* 36/*
30 * atl1c_pci_tbl - PCI Device ID Table 37 * atl1c_pci_tbl - PCI Device ID Table
31 * 38 *
@@ -35,9 +42,12 @@ char atl1c_driver_version[] = ATL1C_DRV_VERSION;
35 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 42 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
36 * Class, Class Mask, private data (not used) } 43 * Class, Class Mask, private data (not used) }
37 */ 44 */
38static struct pci_device_id atl1c_pci_tbl[] = { 45static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = {
39 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)}, 46 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)},
40 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)}, 47 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)},
48 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)},
49 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)},
50 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)},
41 /* required last entry */ 51 /* required last entry */
42 { 0 } 52 { 0 }
43}; 53};
@@ -367,7 +377,7 @@ static void atl1c_set_multi(struct net_device *netdev)
367 AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); 377 AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
368 378
369 /* comoute mc addresses' hash value ,and put it into hash table */ 379 /* comoute mc addresses' hash value ,and put it into hash table */
370 for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { 380 netdev_for_each_mc_addr(mc_ptr, netdev) {
371 hash_value = atl1c_hash_mc_addr(hw, mc_ptr->dmi_addr); 381 hash_value = atl1c_hash_mc_addr(hw, mc_ptr->dmi_addr);
372 atl1c_hash_set(hw, hash_value); 382 atl1c_hash_set(hw, hash_value);
373 } 383 }
@@ -593,11 +603,18 @@ static void atl1c_set_mac_type(struct atl1c_hw *hw)
593 case PCI_DEVICE_ID_ATTANSIC_L2C: 603 case PCI_DEVICE_ID_ATTANSIC_L2C:
594 hw->nic_type = athr_l2c; 604 hw->nic_type = athr_l2c;
595 break; 605 break;
596
597 case PCI_DEVICE_ID_ATTANSIC_L1C: 606 case PCI_DEVICE_ID_ATTANSIC_L1C:
598 hw->nic_type = athr_l1c; 607 hw->nic_type = athr_l1c;
599 break; 608 break;
600 609 case PCI_DEVICE_ID_ATHEROS_L2C_B:
610 hw->nic_type = athr_l2c_b;
611 break;
612 case PCI_DEVICE_ID_ATHEROS_L2C_B2:
613 hw->nic_type = athr_l2c_b2;
614 break;
615 case PCI_DEVICE_ID_ATHEROS_L1D:
616 hw->nic_type = athr_l1d;
617 break;
601 default: 618 default:
602 break; 619 break;
603 } 620 }
@@ -620,10 +637,13 @@ static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
620 hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT; 637 hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT;
621 if (link_ctrl_data & LINK_CTRL_L1_EN) 638 if (link_ctrl_data & LINK_CTRL_L1_EN)
622 hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT; 639 hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT;
640 if (link_ctrl_data & LINK_CTRL_EXT_SYNC)
641 hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC;
623 642
624 if (hw->nic_type == athr_l1c) { 643 if (hw->nic_type == athr_l1c ||
644 hw->nic_type == athr_l1d) {
625 hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON; 645 hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
626 hw->ctrl_flags |= ATL1C_LINK_CAP_1000M; 646 hw->link_cap_flags |= ATL1C_LINK_CAP_1000M;
627 } 647 }
628 return 0; 648 return 0;
629} 649}
@@ -1234,21 +1254,92 @@ static void atl1c_disable_l0s_l1(struct atl1c_hw *hw)
1234static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup) 1254static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
1235{ 1255{
1236 u32 pm_ctrl_data; 1256 u32 pm_ctrl_data;
1257 u32 link_ctrl_data;
1237 1258
1238 AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data); 1259 AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
1239 1260 AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
1240 pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1; 1261 pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
1262
1241 pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK << 1263 pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
1242 PM_CTRL_L1_ENTRY_TIMER_SHIFT); 1264 PM_CTRL_L1_ENTRY_TIMER_SHIFT);
1265 pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK <<
1266 PM_CTRL_LCKDET_TIMER_SHIFT);
1243 1267
1244 pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK; 1268 pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
1269 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1270 pm_ctrl_data |= PM_CTRL_RBER_EN;
1271 pm_ctrl_data |= PM_CTRL_SDES_EN;
1272
1273 if (hw->nic_type == athr_l2c_b ||
1274 hw->nic_type == athr_l1d ||
1275 hw->nic_type == athr_l2c_b2) {
1276 link_ctrl_data &= ~LINK_CTRL_EXT_SYNC;
1277 if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) {
1278 if (hw->nic_type == athr_l2c_b &&
1279 hw->revision_id == L2CB_V10)
1280 link_ctrl_data |= LINK_CTRL_EXT_SYNC;
1281 }
1282
1283 AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data);
1284
1285 pm_ctrl_data |= PM_CTRL_PCIE_RECV;
1286 pm_ctrl_data |= AT_ASPM_L1_TIMER << PM_CTRL_PM_REQ_TIMER_SHIFT;
1287 pm_ctrl_data &= ~PM_CTRL_EN_BUFS_RX_L0S;
1288 pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN;
1289 pm_ctrl_data &= ~PM_CTRL_HOTRST;
1290 pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT;
1291 pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1;
1292 }
1245 1293
1246 if (linkup) { 1294 if (linkup) {
1247 pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN; 1295 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1248 pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1; 1296 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1297 if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
1298 pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
1299 if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT)
1300 pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN;
1301
1302 if (hw->nic_type == athr_l2c_b ||
1303 hw->nic_type == athr_l1d ||
1304 hw->nic_type == athr_l2c_b2) {
1305 if (hw->nic_type == athr_l2c_b)
1306 if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE))
1307 pm_ctrl_data &= PM_CTRL_ASPM_L0S_EN;
1308 pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
1309 pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
1310 pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
1311 pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
1312 if (hw->adapter->link_speed == SPEED_100 ||
1313 hw->adapter->link_speed == SPEED_1000) {
1314 pm_ctrl_data &=
1315 ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
1316 PM_CTRL_L1_ENTRY_TIMER_SHIFT);
1317 if (hw->nic_type == athr_l1d)
1318 pm_ctrl_data |= 0xF <<
1319 PM_CTRL_L1_ENTRY_TIMER_SHIFT;
1320 else
1321 pm_ctrl_data |= 7 <<
1322 PM_CTRL_L1_ENTRY_TIMER_SHIFT;
1323 }
1324 } else {
1325 pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
1326 pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
1327 pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
1328 pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
1329 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1330 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1331 }
1332 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
1333 if (hw->adapter->link_speed == SPEED_10)
1334 if (hw->nic_type == athr_l1d)
1335 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0xB69D);
1336 else
1337 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD);
1338 else if (hw->adapter->link_speed == SPEED_100)
1339 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB2DD);
1340 else
1341 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x96DD);
1249 1342
1250 pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
1251 pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
1252 } else { 1343 } else {
1253 pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN; 1344 pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
1254 pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN; 1345 pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
@@ -1302,6 +1393,10 @@ static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter)
1302 mac_ctrl_data |= MAC_CTRL_MC_ALL_EN; 1393 mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
1303 1394
1304 mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN; 1395 mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN;
1396 if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2) {
1397 mac_ctrl_data |= MAC_CTRL_SPEED_MODE_SW;
1398 mac_ctrl_data |= MAC_CTRL_HASH_ALG_CRC32;
1399 }
1305 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); 1400 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
1306} 1401}
1307 1402
@@ -2596,11 +2691,8 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
2596 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); 2691 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
2597 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); 2692 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
2598 if (netif_msg_probe(adapter)) 2693 if (netif_msg_probe(adapter))
2599 dev_dbg(&pdev->dev, 2694 dev_dbg(&pdev->dev, "mac address : %pM\n",
2600 "mac address : %02x-%02x-%02x-%02x-%02x-%02x\n", 2695 adapter->hw.mac_addr);
2601 adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
2602 adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
2603 adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
2604 2696
2605 atl1c_hw_set_mac_addr(&adapter->hw); 2697 atl1c_hw_set_mac_addr(&adapter->hw);
2606 INIT_WORK(&adapter->common_task, atl1c_common_task); 2698 INIT_WORK(&adapter->common_task, atl1c_common_task);
diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c
index 4a7700620119..76cc043def8c 100644
--- a/drivers/net/atl1e/atl1e_hw.c
+++ b/drivers/net/atl1e/atl1e_hw.c
@@ -394,7 +394,6 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
394int atl1e_phy_commit(struct atl1e_hw *hw) 394int atl1e_phy_commit(struct atl1e_hw *hw)
395{ 395{
396 struct atl1e_adapter *adapter = hw->adapter; 396 struct atl1e_adapter *adapter = hw->adapter;
397 struct pci_dev *pdev = adapter->pdev;
398 int ret_val; 397 int ret_val;
399 u16 phy_data; 398 u16 phy_data;
400 399
@@ -415,12 +414,12 @@ int atl1e_phy_commit(struct atl1e_hw *hw)
415 } 414 }
416 415
417 if (0 != (val & (MDIO_START | MDIO_BUSY))) { 416 if (0 != (val & (MDIO_START | MDIO_BUSY))) {
418 dev_err(&pdev->dev, 417 netdev_err(adapter->netdev,
419 "pcie linkdown at least for 25ms\n"); 418 "pcie linkdown at least for 25ms\n");
420 return ret_val; 419 return ret_val;
421 } 420 }
422 421
423 dev_err(&pdev->dev, "pcie linkup after %d ms\n", i); 422 netdev_err(adapter->netdev, "pcie linkup after %d ms\n", i);
424 } 423 }
425 return 0; 424 return 0;
426} 425}
@@ -428,7 +427,6 @@ int atl1e_phy_commit(struct atl1e_hw *hw)
428int atl1e_phy_init(struct atl1e_hw *hw) 427int atl1e_phy_init(struct atl1e_hw *hw)
429{ 428{
430 struct atl1e_adapter *adapter = hw->adapter; 429 struct atl1e_adapter *adapter = hw->adapter;
431 struct pci_dev *pdev = adapter->pdev;
432 s32 ret_val; 430 s32 ret_val;
433 u16 phy_val; 431 u16 phy_val;
434 432
@@ -492,20 +490,22 @@ int atl1e_phy_init(struct atl1e_hw *hw)
492 /*Enable PHY LinkChange Interrupt */ 490 /*Enable PHY LinkChange Interrupt */
493 ret_val = atl1e_write_phy_reg(hw, MII_INT_CTRL, 0xC00); 491 ret_val = atl1e_write_phy_reg(hw, MII_INT_CTRL, 0xC00);
494 if (ret_val) { 492 if (ret_val) {
495 dev_err(&pdev->dev, "Error enable PHY linkChange Interrupt\n"); 493 netdev_err(adapter->netdev,
494 "Error enable PHY linkChange Interrupt\n");
496 return ret_val; 495 return ret_val;
497 } 496 }
498 /* setup AutoNeg parameters */ 497 /* setup AutoNeg parameters */
499 ret_val = atl1e_phy_setup_autoneg_adv(hw); 498 ret_val = atl1e_phy_setup_autoneg_adv(hw);
500 if (ret_val) { 499 if (ret_val) {
501 dev_err(&pdev->dev, "Error Setting up Auto-Negotiation\n"); 500 netdev_err(adapter->netdev,
501 "Error Setting up Auto-Negotiation\n");
502 return ret_val; 502 return ret_val;
503 } 503 }
504 /* SW.Reset & En-Auto-Neg to restart Auto-Neg*/ 504 /* SW.Reset & En-Auto-Neg to restart Auto-Neg*/
505 dev_dbg(&pdev->dev, "Restarting Auto-Neg"); 505 netdev_dbg(adapter->netdev, "Restarting Auto-Negotiation\n");
506 ret_val = atl1e_phy_commit(hw); 506 ret_val = atl1e_phy_commit(hw);
507 if (ret_val) { 507 if (ret_val) {
508 dev_err(&pdev->dev, "Error Resetting the phy"); 508 netdev_err(adapter->netdev, "Error resetting the phy\n");
509 return ret_val; 509 return ret_val;
510 } 510 }
511 511
@@ -559,9 +559,8 @@ int atl1e_reset_hw(struct atl1e_hw *hw)
559 } 559 }
560 560
561 if (timeout >= AT_HW_MAX_IDLE_DELAY) { 561 if (timeout >= AT_HW_MAX_IDLE_DELAY) {
562 dev_err(&pdev->dev, 562 netdev_err(adapter->netdev,
563 "MAC state machine cann't be idle since" 563 "MAC state machine can't be idle since disabled for 10ms second\n");
564 " disabled for 10ms second\n");
565 return AT_ERR_TIMEOUT; 564 return AT_ERR_TIMEOUT;
566 } 565 }
567 566
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 08f8c0969e9b..73302ae468aa 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -35,7 +35,7 @@ char atl1e_driver_version[] = DRV_VERSION;
35 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 35 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
36 * Class, Class Mask, private data (not used) } 36 * Class, Class Mask, private data (not used) }
37 */ 37 */
38static struct pci_device_id atl1e_pci_tbl[] = { 38static DEFINE_PCI_DEVICE_TABLE(atl1e_pci_tbl) = {
39 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)}, 39 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)},
40 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)}, 40 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)},
41 /* required last entry */ 41 /* required last entry */
@@ -164,11 +164,10 @@ static int atl1e_check_link(struct atl1e_adapter *adapter)
164{ 164{
165 struct atl1e_hw *hw = &adapter->hw; 165 struct atl1e_hw *hw = &adapter->hw;
166 struct net_device *netdev = adapter->netdev; 166 struct net_device *netdev = adapter->netdev;
167 struct pci_dev *pdev = adapter->pdev;
168 int err = 0; 167 int err = 0;
169 u16 speed, duplex, phy_data; 168 u16 speed, duplex, phy_data;
170 169
171 /* MII_BMSR must read twise */ 170 /* MII_BMSR must read twice */
172 atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); 171 atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
173 atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); 172 atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
174 if ((phy_data & BMSR_LSTATUS) == 0) { 173 if ((phy_data & BMSR_LSTATUS) == 0) {
@@ -195,12 +194,11 @@ static int atl1e_check_link(struct atl1e_adapter *adapter)
195 adapter->link_speed = speed; 194 adapter->link_speed = speed;
196 adapter->link_duplex = duplex; 195 adapter->link_duplex = duplex;
197 atl1e_setup_mac_ctrl(adapter); 196 atl1e_setup_mac_ctrl(adapter);
198 dev_info(&pdev->dev, 197 netdev_info(netdev,
199 "%s: %s NIC Link is Up<%d Mbps %s>\n", 198 "NIC Link is Up <%d Mbps %s Duplex>\n",
200 atl1e_driver_name, netdev->name, 199 adapter->link_speed,
201 adapter->link_speed, 200 adapter->link_duplex == FULL_DUPLEX ?
202 adapter->link_duplex == FULL_DUPLEX ? 201 "Full" : "Half");
203 "Full Duplex" : "Half Duplex");
204 } 202 }
205 203
206 if (!netif_carrier_ok(netdev)) { 204 if (!netif_carrier_ok(netdev)) {
@@ -230,7 +228,6 @@ static void atl1e_link_chg_task(struct work_struct *work)
230static void atl1e_link_chg_event(struct atl1e_adapter *adapter) 228static void atl1e_link_chg_event(struct atl1e_adapter *adapter)
231{ 229{
232 struct net_device *netdev = adapter->netdev; 230 struct net_device *netdev = adapter->netdev;
233 struct pci_dev *pdev = adapter->pdev;
234 u16 phy_data = 0; 231 u16 phy_data = 0;
235 u16 link_up = 0; 232 u16 link_up = 0;
236 233
@@ -243,8 +240,7 @@ static void atl1e_link_chg_event(struct atl1e_adapter *adapter)
243 if (!link_up) { 240 if (!link_up) {
244 if (netif_carrier_ok(netdev)) { 241 if (netif_carrier_ok(netdev)) {
245 /* old link state: Up */ 242 /* old link state: Up */
246 dev_info(&pdev->dev, "%s: %s NIC Link is Down\n", 243 netdev_info(netdev, "NIC Link is Down\n");
247 atl1e_driver_name, netdev->name);
248 adapter->link_speed = SPEED_0; 244 adapter->link_speed = SPEED_0;
249 netif_stop_queue(netdev); 245 netif_stop_queue(netdev);
250 } 246 }
@@ -311,7 +307,7 @@ static void atl1e_set_multi(struct net_device *netdev)
311 AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); 307 AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
312 308
313 /* comoute mc addresses' hash value ,and put it into hash table */ 309 /* comoute mc addresses' hash value ,and put it into hash table */
314 for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { 310 netdev_for_each_mc_addr(mc_ptr, netdev) {
315 hash_value = atl1e_hash_mc_addr(hw, mc_ptr->dmi_addr); 311 hash_value = atl1e_hash_mc_addr(hw, mc_ptr->dmi_addr);
316 atl1e_hash_set(hw, hash_value); 312 atl1e_hash_set(hw, hash_value);
317 } 313 }
@@ -321,10 +317,9 @@ static void atl1e_vlan_rx_register(struct net_device *netdev,
321 struct vlan_group *grp) 317 struct vlan_group *grp)
322{ 318{
323 struct atl1e_adapter *adapter = netdev_priv(netdev); 319 struct atl1e_adapter *adapter = netdev_priv(netdev);
324 struct pci_dev *pdev = adapter->pdev;
325 u32 mac_ctrl_data = 0; 320 u32 mac_ctrl_data = 0;
326 321
327 dev_dbg(&pdev->dev, "atl1e_vlan_rx_register\n"); 322 netdev_dbg(adapter->netdev, "%s\n", __func__);
328 323
329 atl1e_irq_disable(adapter); 324 atl1e_irq_disable(adapter);
330 325
@@ -345,9 +340,7 @@ static void atl1e_vlan_rx_register(struct net_device *netdev,
345 340
346static void atl1e_restore_vlan(struct atl1e_adapter *adapter) 341static void atl1e_restore_vlan(struct atl1e_adapter *adapter)
347{ 342{
348 struct pci_dev *pdev = adapter->pdev; 343 netdev_dbg(adapter->netdev, "%s\n", __func__);
349
350 dev_dbg(&pdev->dev, "atl1e_restore_vlan !");
351 atl1e_vlan_rx_register(adapter->netdev, adapter->vlgrp); 344 atl1e_vlan_rx_register(adapter->netdev, adapter->vlgrp);
352} 345}
353/* 346/*
@@ -391,7 +384,7 @@ static int atl1e_change_mtu(struct net_device *netdev, int new_mtu)
391 384
392 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 385 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
393 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 386 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
394 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); 387 netdev_warn(adapter->netdev, "invalid MTU setting\n");
395 return -EINVAL; 388 return -EINVAL;
396 } 389 }
397 /* set MTU */ 390 /* set MTU */
@@ -438,7 +431,6 @@ static int atl1e_mii_ioctl(struct net_device *netdev,
438 struct ifreq *ifr, int cmd) 431 struct ifreq *ifr, int cmd)
439{ 432{
440 struct atl1e_adapter *adapter = netdev_priv(netdev); 433 struct atl1e_adapter *adapter = netdev_priv(netdev);
441 struct pci_dev *pdev = adapter->pdev;
442 struct mii_ioctl_data *data = if_mii(ifr); 434 struct mii_ioctl_data *data = if_mii(ifr);
443 unsigned long flags; 435 unsigned long flags;
444 int retval = 0; 436 int retval = 0;
@@ -466,8 +458,8 @@ static int atl1e_mii_ioctl(struct net_device *netdev,
466 goto out; 458 goto out;
467 } 459 }
468 460
469 dev_dbg(&pdev->dev, "<atl1e_mii_ioctl> write %x %x", 461 netdev_dbg(adapter->netdev, "<atl1e_mii_ioctl> write %x %x\n",
470 data->reg_num, data->val_in); 462 data->reg_num, data->val_in);
471 if (atl1e_write_phy_reg(&adapter->hw, 463 if (atl1e_write_phy_reg(&adapter->hw,
472 data->reg_num, data->val_in)) { 464 data->reg_num, data->val_in)) {
473 retval = -EIO; 465 retval = -EIO;
@@ -602,7 +594,7 @@ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
602 hw->dmaw_dly_cnt = 4; 594 hw->dmaw_dly_cnt = 4;
603 595
604 if (atl1e_alloc_queues(adapter)) { 596 if (atl1e_alloc_queues(adapter)) {
605 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 597 netdev_err(adapter->netdev, "Unable to allocate memory for queues\n");
606 return -ENOMEM; 598 return -ENOMEM;
607 } 599 }
608 600
@@ -800,8 +792,8 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
800 adapter->ring_size, &adapter->ring_dma); 792 adapter->ring_size, &adapter->ring_dma);
801 793
802 if (adapter->ring_vir_addr == NULL) { 794 if (adapter->ring_vir_addr == NULL) {
803 dev_err(&pdev->dev, "pci_alloc_consistent failed, " 795 netdev_err(adapter->netdev,
804 "size = D%d", size); 796 "pci_alloc_consistent failed, size = D%d\n", size);
805 return -ENOMEM; 797 return -ENOMEM;
806 } 798 }
807 799
@@ -817,7 +809,8 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
817 size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count); 809 size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count);
818 tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL); 810 tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL);
819 if (tx_ring->tx_buffer == NULL) { 811 if (tx_ring->tx_buffer == NULL) {
820 dev_err(&pdev->dev, "kzalloc failed , size = D%d", size); 812 netdev_err(adapter->netdev, "kzalloc failed, size = D%d\n",
813 size);
821 err = -ENOMEM; 814 err = -ENOMEM;
822 goto failed; 815 goto failed;
823 } 816 }
@@ -852,8 +845,8 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
852 } 845 }
853 846
854 if (unlikely(offset > adapter->ring_size)) { 847 if (unlikely(offset > adapter->ring_size)) {
855 dev_err(&pdev->dev, "offset(%d) > ring size(%d) !!\n", 848 netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n",
856 offset, adapter->ring_size); 849 offset, adapter->ring_size);
857 err = -1; 850 err = -1;
858 goto failed; 851 goto failed;
859 } 852 }
@@ -1077,7 +1070,6 @@ static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
1077static int atl1e_configure(struct atl1e_adapter *adapter) 1070static int atl1e_configure(struct atl1e_adapter *adapter)
1078{ 1071{
1079 struct atl1e_hw *hw = &adapter->hw; 1072 struct atl1e_hw *hw = &adapter->hw;
1080 struct pci_dev *pdev = adapter->pdev;
1081 1073
1082 u32 intr_status_data = 0; 1074 u32 intr_status_data = 0;
1083 1075
@@ -1130,8 +1122,8 @@ static int atl1e_configure(struct atl1e_adapter *adapter)
1130 1122
1131 intr_status_data = AT_READ_REG(hw, REG_ISR); 1123 intr_status_data = AT_READ_REG(hw, REG_ISR);
1132 if (unlikely((intr_status_data & ISR_PHY_LINKDOWN) != 0)) { 1124 if (unlikely((intr_status_data & ISR_PHY_LINKDOWN) != 0)) {
1133 dev_err(&pdev->dev, "atl1e_configure failed," 1125 netdev_err(adapter->netdev,
1134 "PCIE phy link down\n"); 1126 "atl1e_configure failed, PCIE phy link down\n");
1135 return -1; 1127 return -1;
1136 } 1128 }
1137 1129
@@ -1262,7 +1254,6 @@ static irqreturn_t atl1e_intr(int irq, void *data)
1262{ 1254{
1263 struct net_device *netdev = data; 1255 struct net_device *netdev = data;
1264 struct atl1e_adapter *adapter = netdev_priv(netdev); 1256 struct atl1e_adapter *adapter = netdev_priv(netdev);
1265 struct pci_dev *pdev = adapter->pdev;
1266 struct atl1e_hw *hw = &adapter->hw; 1257 struct atl1e_hw *hw = &adapter->hw;
1267 int max_ints = AT_MAX_INT_WORK; 1258 int max_ints = AT_MAX_INT_WORK;
1268 int handled = IRQ_NONE; 1259 int handled = IRQ_NONE;
@@ -1285,8 +1276,8 @@ static irqreturn_t atl1e_intr(int irq, void *data)
1285 handled = IRQ_HANDLED; 1276 handled = IRQ_HANDLED;
1286 /* check if PCIE PHY Link down */ 1277 /* check if PCIE PHY Link down */
1287 if (status & ISR_PHY_LINKDOWN) { 1278 if (status & ISR_PHY_LINKDOWN) {
1288 dev_err(&pdev->dev, 1279 netdev_err(adapter->netdev,
1289 "pcie phy linkdown %x\n", status); 1280 "pcie phy linkdown %x\n", status);
1290 if (netif_running(adapter->netdev)) { 1281 if (netif_running(adapter->netdev)) {
1291 /* reset MAC */ 1282 /* reset MAC */
1292 atl1e_irq_reset(adapter); 1283 atl1e_irq_reset(adapter);
@@ -1297,9 +1288,9 @@ static irqreturn_t atl1e_intr(int irq, void *data)
1297 1288
1298 /* check if DMA read/write error */ 1289 /* check if DMA read/write error */
1299 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { 1290 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
1300 dev_err(&pdev->dev, 1291 netdev_err(adapter->netdev,
1301 "PCIE DMA RW error (status = 0x%x)\n", 1292 "PCIE DMA RW error (status = 0x%x)\n",
1302 status); 1293 status);
1303 atl1e_irq_reset(adapter); 1294 atl1e_irq_reset(adapter);
1304 schedule_work(&adapter->reset_task); 1295 schedule_work(&adapter->reset_task);
1305 break; 1296 break;
@@ -1382,7 +1373,6 @@ static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter,
1382static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que, 1373static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
1383 int *work_done, int work_to_do) 1374 int *work_done, int work_to_do)
1384{ 1375{
1385 struct pci_dev *pdev = adapter->pdev;
1386 struct net_device *netdev = adapter->netdev; 1376 struct net_device *netdev = adapter->netdev;
1387 struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *) 1377 struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *)
1388 &adapter->rx_ring; 1378 &adapter->rx_ring;
@@ -1404,11 +1394,10 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
1404 rx_page->read_offset); 1394 rx_page->read_offset);
1405 /* check sequence number */ 1395 /* check sequence number */
1406 if (prrs->seq_num != rx_page_desc[que].rx_nxseq) { 1396 if (prrs->seq_num != rx_page_desc[que].rx_nxseq) {
1407 dev_err(&pdev->dev, 1397 netdev_err(netdev,
1408 "rx sequence number" 1398 "rx sequence number error (rx=%d) (expect=%d)\n",
1409 " error (rx=%d) (expect=%d)\n", 1399 prrs->seq_num,
1410 prrs->seq_num, 1400 rx_page_desc[que].rx_nxseq);
1411 rx_page_desc[que].rx_nxseq);
1412 rx_page_desc[que].rx_nxseq++; 1401 rx_page_desc[que].rx_nxseq++;
1413 /* just for debug use */ 1402 /* just for debug use */
1414 AT_WRITE_REG(&adapter->hw, REG_DEBUG_DATA0, 1403 AT_WRITE_REG(&adapter->hw, REG_DEBUG_DATA0,
@@ -1424,9 +1413,9 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
1424 RRS_ERR_DRIBBLE | RRS_ERR_CODE | 1413 RRS_ERR_DRIBBLE | RRS_ERR_CODE |
1425 RRS_ERR_TRUNC)) { 1414 RRS_ERR_TRUNC)) {
1426 /* hardware error, discard this packet*/ 1415 /* hardware error, discard this packet*/
1427 dev_err(&pdev->dev, 1416 netdev_err(netdev,
1428 "rx packet desc error %x\n", 1417 "rx packet desc error %x\n",
1429 *((u32 *)prrs + 1)); 1418 *((u32 *)prrs + 1));
1430 goto skip_pkt; 1419 goto skip_pkt;
1431 } 1420 }
1432 } 1421 }
@@ -1435,8 +1424,8 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
1435 RRS_PKT_SIZE_MASK) - 4; /* CRC */ 1424 RRS_PKT_SIZE_MASK) - 4; /* CRC */
1436 skb = netdev_alloc_skb_ip_align(netdev, packet_size); 1425 skb = netdev_alloc_skb_ip_align(netdev, packet_size);
1437 if (skb == NULL) { 1426 if (skb == NULL) {
1438 dev_warn(&pdev->dev, "%s: Memory squeeze," 1427 netdev_warn(netdev,
1439 "deferring packet.\n", netdev->name); 1428 "Memory squeeze, deferring packet\n");
1440 goto skip_pkt; 1429 goto skip_pkt;
1441 } 1430 }
1442 skb->dev = netdev; 1431 skb->dev = netdev;
@@ -1450,9 +1439,9 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
1450 u16 vlan_tag = (prrs->vtag >> 4) | 1439 u16 vlan_tag = (prrs->vtag >> 4) |
1451 ((prrs->vtag & 7) << 13) | 1440 ((prrs->vtag & 7) << 13) |
1452 ((prrs->vtag & 8) << 9); 1441 ((prrs->vtag & 8) << 9);
1453 dev_dbg(&pdev->dev, 1442 netdev_dbg(netdev,
1454 "RXD VLAN TAG<RRD>=0x%04x\n", 1443 "RXD VLAN TAG<RRD>=0x%04x\n",
1455 prrs->vtag); 1444 prrs->vtag);
1456 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 1445 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
1457 vlan_tag); 1446 vlan_tag);
1458 } else { 1447 } else {
@@ -1500,7 +1489,6 @@ static int atl1e_clean(struct napi_struct *napi, int budget)
1500{ 1489{
1501 struct atl1e_adapter *adapter = 1490 struct atl1e_adapter *adapter =
1502 container_of(napi, struct atl1e_adapter, napi); 1491 container_of(napi, struct atl1e_adapter, napi);
1503 struct pci_dev *pdev = adapter->pdev;
1504 u32 imr_data; 1492 u32 imr_data;
1505 int work_done = 0; 1493 int work_done = 0;
1506 1494
@@ -1519,8 +1507,8 @@ quit_polling:
1519 /* test debug */ 1507 /* test debug */
1520 if (test_bit(__AT_DOWN, &adapter->flags)) { 1508 if (test_bit(__AT_DOWN, &adapter->flags)) {
1521 atomic_dec(&adapter->irq_sem); 1509 atomic_dec(&adapter->irq_sem);
1522 dev_err(&pdev->dev, 1510 netdev_err(adapter->netdev,
1523 "atl1e_clean is called when AT_DOWN\n"); 1511 "atl1e_clean is called when AT_DOWN\n");
1524 } 1512 }
1525 /* reenable RX intr */ 1513 /* reenable RX intr */
1526 /*atl1e_irq_enable(adapter); */ 1514 /*atl1e_irq_enable(adapter); */
@@ -1618,7 +1606,6 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
1618static int atl1e_tso_csum(struct atl1e_adapter *adapter, 1606static int atl1e_tso_csum(struct atl1e_adapter *adapter,
1619 struct sk_buff *skb, struct atl1e_tpd_desc *tpd) 1607 struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
1620{ 1608{
1621 struct pci_dev *pdev = adapter->pdev;
1622 u8 hdr_len; 1609 u8 hdr_len;
1623 u32 real_len; 1610 u32 real_len;
1624 unsigned short offload_type; 1611 unsigned short offload_type;
@@ -1642,8 +1629,8 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter,
1642 hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); 1629 hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
1643 if (unlikely(skb->len == hdr_len)) { 1630 if (unlikely(skb->len == hdr_len)) {
1644 /* only xsum need */ 1631 /* only xsum need */
1645 dev_warn(&pdev->dev, 1632 netdev_warn(adapter->netdev,
1646 "IPV4 tso with zero data??\n"); 1633 "IPV4 tso with zero data??\n");
1647 goto check_sum; 1634 goto check_sum;
1648 } else { 1635 } else {
1649 ip_hdr(skb)->check = 0; 1636 ip_hdr(skb)->check = 0;
@@ -1672,8 +1659,8 @@ check_sum:
1672 1659
1673 cso = skb_transport_offset(skb); 1660 cso = skb_transport_offset(skb);
1674 if (unlikely(cso & 0x1)) { 1661 if (unlikely(cso & 0x1)) {
1675 dev_err(&adapter->pdev->dev, 1662 netdev_err(adapter->netdev,
1676 "pay load offset should not ant event number\n"); 1663 "payload offset should not ant event number\n");
1677 return -1; 1664 return -1;
1678 } else { 1665 } else {
1679 css = cso + skb->csum_offset; 1666 css = cso + skb->csum_offset;
@@ -1886,8 +1873,8 @@ static int atl1e_request_irq(struct atl1e_adapter *adapter)
1886 adapter->have_msi = true; 1873 adapter->have_msi = true;
1887 err = pci_enable_msi(adapter->pdev); 1874 err = pci_enable_msi(adapter->pdev);
1888 if (err) { 1875 if (err) {
1889 dev_dbg(&pdev->dev, 1876 netdev_dbg(adapter->netdev,
1890 "Unable to allocate MSI interrupt Error: %d\n", err); 1877 "Unable to allocate MSI interrupt Error: %d\n", err);
1891 adapter->have_msi = false; 1878 adapter->have_msi = false;
1892 } else 1879 } else
1893 netdev->irq = pdev->irq; 1880 netdev->irq = pdev->irq;
@@ -1898,13 +1885,13 @@ static int atl1e_request_irq(struct atl1e_adapter *adapter)
1898 err = request_irq(adapter->pdev->irq, atl1e_intr, flags, 1885 err = request_irq(adapter->pdev->irq, atl1e_intr, flags,
1899 netdev->name, netdev); 1886 netdev->name, netdev);
1900 if (err) { 1887 if (err) {
1901 dev_dbg(&pdev->dev, 1888 netdev_dbg(adapter->netdev,
1902 "Unable to allocate interrupt Error: %d\n", err); 1889 "Unable to allocate interrupt Error: %d\n", err);
1903 if (adapter->have_msi) 1890 if (adapter->have_msi)
1904 pci_disable_msi(adapter->pdev); 1891 pci_disable_msi(adapter->pdev);
1905 return err; 1892 return err;
1906 } 1893 }
1907 dev_dbg(&pdev->dev, "atl1e_request_irq OK\n"); 1894 netdev_dbg(adapter->netdev, "atl1e_request_irq OK\n");
1908 return err; 1895 return err;
1909} 1896}
1910 1897
@@ -2078,7 +2065,7 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
2078 (atl1e_write_phy_reg(hw, 2065 (atl1e_write_phy_reg(hw,
2079 MII_ADVERTISE, mii_advertise_data) != 0) || 2066 MII_ADVERTISE, mii_advertise_data) != 0) ||
2080 (atl1e_phy_commit(hw)) != 0) { 2067 (atl1e_phy_commit(hw)) != 0) {
2081 dev_dbg(&pdev->dev, "set phy register failed\n"); 2068 netdev_dbg(adapter->netdev, "set phy register failed\n");
2082 goto wol_dis; 2069 goto wol_dis;
2083 } 2070 }
2084 2071
@@ -2100,17 +2087,14 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
2100 } 2087 }
2101 2088
2102 if ((mii_bmsr_data & BMSR_LSTATUS) == 0) 2089 if ((mii_bmsr_data & BMSR_LSTATUS) == 0)
2103 dev_dbg(&pdev->dev, 2090 netdev_dbg(adapter->netdev,
2104 "%s: Link may change" 2091 "Link may change when suspend\n");
2105 "when suspend\n",
2106 atl1e_driver_name);
2107 } 2092 }
2108 wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN; 2093 wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
2109 /* only link up can wake up */ 2094 /* only link up can wake up */
2110 if (atl1e_write_phy_reg(hw, MII_INT_CTRL, 0x400) != 0) { 2095 if (atl1e_write_phy_reg(hw, MII_INT_CTRL, 0x400) != 0) {
2111 dev_dbg(&pdev->dev, "%s: read write phy " 2096 netdev_dbg(adapter->netdev,
2112 "register failed.\n", 2097 "read write phy register failed\n");
2113 atl1e_driver_name);
2114 goto wol_dis; 2098 goto wol_dis;
2115 } 2099 }
2116 } 2100 }
@@ -2131,9 +2115,8 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
2131 if (wufc & AT_WUFC_MAG) 2115 if (wufc & AT_WUFC_MAG)
2132 mac_ctrl_data |= MAC_CTRL_BC_EN; 2116 mac_ctrl_data |= MAC_CTRL_BC_EN;
2133 2117
2134 dev_dbg(&pdev->dev, 2118 netdev_dbg(adapter->netdev, "suspend MAC=0x%x\n",
2135 "%s: suspend MAC=0x%x\n", 2119 mac_ctrl_data);
2136 atl1e_driver_name, mac_ctrl_data);
2137 2120
2138 AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data); 2121 AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
2139 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); 2122 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
@@ -2183,8 +2166,8 @@ static int atl1e_resume(struct pci_dev *pdev)
2183 2166
2184 err = pci_enable_device(pdev); 2167 err = pci_enable_device(pdev);
2185 if (err) { 2168 if (err) {
2186 dev_err(&pdev->dev, "ATL1e: Cannot enable PCI" 2169 netdev_err(adapter->netdev,
2187 " device from suspend\n"); 2170 "Cannot enable PCI device from suspend\n");
2188 return err; 2171 return err;
2189 } 2172 }
2190 2173
@@ -2315,7 +2298,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
2315 2298
2316 err = atl1e_init_netdev(netdev, pdev); 2299 err = atl1e_init_netdev(netdev, pdev);
2317 if (err) { 2300 if (err) {
2318 dev_err(&pdev->dev, "init netdevice failed\n"); 2301 netdev_err(netdev, "init netdevice failed\n");
2319 goto err_init_netdev; 2302 goto err_init_netdev;
2320 } 2303 }
2321 adapter = netdev_priv(netdev); 2304 adapter = netdev_priv(netdev);
@@ -2326,7 +2309,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
2326 adapter->hw.hw_addr = pci_iomap(pdev, BAR_0, 0); 2309 adapter->hw.hw_addr = pci_iomap(pdev, BAR_0, 0);
2327 if (!adapter->hw.hw_addr) { 2310 if (!adapter->hw.hw_addr) {
2328 err = -EIO; 2311 err = -EIO;
2329 dev_err(&pdev->dev, "cannot map device registers\n"); 2312 netdev_err(netdev, "cannot map device registers\n");
2330 goto err_ioremap; 2313 goto err_ioremap;
2331 } 2314 }
2332 netdev->base_addr = (unsigned long)adapter->hw.hw_addr; 2315 netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
@@ -2356,7 +2339,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
2356 /* setup the private structure */ 2339 /* setup the private structure */
2357 err = atl1e_sw_init(adapter); 2340 err = atl1e_sw_init(adapter);
2358 if (err) { 2341 if (err) {
2359 dev_err(&pdev->dev, "net device private data init failed\n"); 2342 netdev_err(netdev, "net device private data init failed\n");
2360 goto err_sw_init; 2343 goto err_sw_init;
2361 } 2344 }
2362 2345
@@ -2372,22 +2355,19 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
2372 2355
2373 if (atl1e_read_mac_addr(&adapter->hw) != 0) { 2356 if (atl1e_read_mac_addr(&adapter->hw) != 0) {
2374 err = -EIO; 2357 err = -EIO;
2375 dev_err(&pdev->dev, "get mac address failed\n"); 2358 netdev_err(netdev, "get mac address failed\n");
2376 goto err_eeprom; 2359 goto err_eeprom;
2377 } 2360 }
2378 2361
2379 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); 2362 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
2380 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); 2363 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
2381 dev_dbg(&pdev->dev, "mac address : %02x-%02x-%02x-%02x-%02x-%02x\n", 2364 netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr);
2382 adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
2383 adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
2384 adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
2385 2365
2386 INIT_WORK(&adapter->reset_task, atl1e_reset_task); 2366 INIT_WORK(&adapter->reset_task, atl1e_reset_task);
2387 INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); 2367 INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
2388 err = register_netdev(netdev); 2368 err = register_netdev(netdev);
2389 if (err) { 2369 if (err) {
2390 dev_err(&pdev->dev, "register netdevice failed\n"); 2370 netdev_err(netdev, "register netdevice failed\n");
2391 goto err_register; 2371 goto err_register;
2392 } 2372 }
2393 2373
@@ -2488,8 +2468,8 @@ static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev)
2488 struct atl1e_adapter *adapter = netdev_priv(netdev); 2468 struct atl1e_adapter *adapter = netdev_priv(netdev);
2489 2469
2490 if (pci_enable_device(pdev)) { 2470 if (pci_enable_device(pdev)) {
2491 dev_err(&pdev->dev, 2471 netdev_err(adapter->netdev,
2492 "ATL1e: Cannot re-enable PCI device after reset.\n"); 2472 "Cannot re-enable PCI device after reset\n");
2493 return PCI_ERS_RESULT_DISCONNECT; 2473 return PCI_ERS_RESULT_DISCONNECT;
2494 } 2474 }
2495 pci_set_master(pdev); 2475 pci_set_master(pdev);
@@ -2517,8 +2497,8 @@ static void atl1e_io_resume(struct pci_dev *pdev)
2517 2497
2518 if (netif_running(netdev)) { 2498 if (netif_running(netdev)) {
2519 if (atl1e_up(adapter)) { 2499 if (atl1e_up(adapter)) {
2520 dev_err(&pdev->dev, 2500 netdev_err(adapter->netdev,
2521 "ATL1e: can't bring device back up after reset\n"); 2501 "can't bring device back up after reset\n");
2522 return; 2502 return;
2523 } 2503 }
2524 } 2504 }
diff --git a/drivers/net/atl1e/atl1e_param.c b/drivers/net/atl1e/atl1e_param.c
index b3be59fd3fb5..0ce60b6e7ef0 100644
--- a/drivers/net/atl1e/atl1e_param.c
+++ b/drivers/net/atl1e/atl1e_param.c
@@ -116,7 +116,7 @@ struct atl1e_option {
116 } arg; 116 } arg;
117}; 117};
118 118
119static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt, struct pci_dev *pdev) 119static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt, struct atl1e_adapter *adapter)
120{ 120{
121 if (*value == OPTION_UNSET) { 121 if (*value == OPTION_UNSET) {
122 *value = opt->def; 122 *value = opt->def;
@@ -127,16 +127,19 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
127 case enable_option: 127 case enable_option:
128 switch (*value) { 128 switch (*value) {
129 case OPTION_ENABLED: 129 case OPTION_ENABLED:
130 dev_info(&pdev->dev, "%s Enabled\n", opt->name); 130 netdev_info(adapter->netdev,
131 "%s Enabled\n", opt->name);
131 return 0; 132 return 0;
132 case OPTION_DISABLED: 133 case OPTION_DISABLED:
133 dev_info(&pdev->dev, "%s Disabled\n", opt->name); 134 netdev_info(adapter->netdev,
135 "%s Disabled\n", opt->name);
134 return 0; 136 return 0;
135 } 137 }
136 break; 138 break;
137 case range_option: 139 case range_option:
138 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 140 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
139 dev_info(&pdev->dev, "%s set to %i\n", opt->name, *value); 141 netdev_info(adapter->netdev, "%s set to %i\n",
142 opt->name, *value);
140 return 0; 143 return 0;
141 } 144 }
142 break; 145 break;
@@ -148,8 +151,8 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
148 ent = &opt->arg.l.p[i]; 151 ent = &opt->arg.l.p[i];
149 if (*value == ent->i) { 152 if (*value == ent->i) {
150 if (ent->str[0] != '\0') 153 if (ent->str[0] != '\0')
151 dev_info(&pdev->dev, "%s\n", 154 netdev_info(adapter->netdev,
152 ent->str); 155 "%s\n", ent->str);
153 return 0; 156 return 0;
154 } 157 }
155 } 158 }
@@ -159,8 +162,8 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
159 BUG(); 162 BUG();
160 } 163 }
161 164
162 dev_info(&pdev->dev, "Invalid %s specified (%i) %s\n", 165 netdev_info(adapter->netdev, "Invalid %s specified (%i) %s\n",
163 opt->name, *value, opt->err); 166 opt->name, *value, opt->err);
164 *value = opt->def; 167 *value = opt->def;
165 return -1; 168 return -1;
166} 169}
@@ -176,11 +179,13 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
176 */ 179 */
177void __devinit atl1e_check_options(struct atl1e_adapter *adapter) 180void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
178{ 181{
179 struct pci_dev *pdev = adapter->pdev;
180 int bd = adapter->bd_number; 182 int bd = adapter->bd_number;
183
181 if (bd >= ATL1E_MAX_NIC) { 184 if (bd >= ATL1E_MAX_NIC) {
182 dev_notice(&pdev->dev, "no configuration for board #%i\n", bd); 185 netdev_notice(adapter->netdev,
183 dev_notice(&pdev->dev, "Using defaults for all values\n"); 186 "no configuration for board #%i\n", bd);
187 netdev_notice(adapter->netdev,
188 "Using defaults for all values\n");
184 } 189 }
185 190
186 { /* Transmit Ring Size */ 191 { /* Transmit Ring Size */
@@ -196,7 +201,7 @@ void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
196 int val; 201 int val;
197 if (num_tx_desc_cnt > bd) { 202 if (num_tx_desc_cnt > bd) {
198 val = tx_desc_cnt[bd]; 203 val = tx_desc_cnt[bd];
199 atl1e_validate_option(&val, &opt, pdev); 204 atl1e_validate_option(&val, &opt, adapter);
200 adapter->tx_ring.count = (u16) val & 0xFFFC; 205 adapter->tx_ring.count = (u16) val & 0xFFFC;
201 } else 206 } else
202 adapter->tx_ring.count = (u16)opt.def; 207 adapter->tx_ring.count = (u16)opt.def;
@@ -215,7 +220,7 @@ void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
215 int val; 220 int val;
216 if (num_rx_mem_size > bd) { 221 if (num_rx_mem_size > bd) {
217 val = rx_mem_size[bd]; 222 val = rx_mem_size[bd];
218 atl1e_validate_option(&val, &opt, pdev); 223 atl1e_validate_option(&val, &opt, adapter);
219 adapter->rx_ring.page_size = (u32)val * 1024; 224 adapter->rx_ring.page_size = (u32)val * 1024;
220 } else { 225 } else {
221 adapter->rx_ring.page_size = (u32)opt.def * 1024; 226 adapter->rx_ring.page_size = (u32)opt.def * 1024;
@@ -235,7 +240,7 @@ void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
235 int val; 240 int val;
236 if (num_int_mod_timer > bd) { 241 if (num_int_mod_timer > bd) {
237 val = int_mod_timer[bd]; 242 val = int_mod_timer[bd];
238 atl1e_validate_option(&val, &opt, pdev); 243 atl1e_validate_option(&val, &opt, adapter);
239 adapter->hw.imt = (u16) val; 244 adapter->hw.imt = (u16) val;
240 } else 245 } else
241 adapter->hw.imt = (u16)(opt.def); 246 adapter->hw.imt = (u16)(opt.def);
@@ -254,7 +259,7 @@ void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
254 int val; 259 int val;
255 if (num_media_type > bd) { 260 if (num_media_type > bd) {
256 val = media_type[bd]; 261 val = media_type[bd];
257 atl1e_validate_option(&val, &opt, pdev); 262 atl1e_validate_option(&val, &opt, adapter);
258 adapter->hw.media_type = (u16) val; 263 adapter->hw.media_type = (u16) val;
259 } else 264 } else
260 adapter->hw.media_type = (u16)(opt.def); 265 adapter->hw.media_type = (u16)(opt.def);
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index b6cf3263127c..9ba547069db3 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -232,7 +232,7 @@ static void __devinit atl1_check_options(struct atl1_adapter *adapter)
232/* 232/*
233 * atl1_pci_tbl - PCI Device ID Table 233 * atl1_pci_tbl - PCI Device ID Table
234 */ 234 */
235static const struct pci_device_id atl1_pci_tbl[] = { 235static DEFINE_PCI_DEVICE_TABLE(atl1_pci_tbl) = {
236 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)}, 236 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
237 /* required last entry */ 237 /* required last entry */
238 {0,} 238 {0,}
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index ec52529394ad..7061d7108f08 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -63,7 +63,7 @@ MODULE_VERSION(ATL2_DRV_VERSION);
63/* 63/*
64 * atl2_pci_tbl - PCI Device ID Table 64 * atl2_pci_tbl - PCI Device ID Table
65 */ 65 */
66static struct pci_device_id atl2_pci_tbl[] = { 66static DEFINE_PCI_DEVICE_TABLE(atl2_pci_tbl) = {
67 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)}, 67 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)},
68 /* required last entry */ 68 /* required last entry */
69 {0,} 69 {0,}
@@ -157,7 +157,7 @@ static void atl2_set_multi(struct net_device *netdev)
157 ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); 157 ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
158 158
159 /* comoute mc addresses' hash value ,and put it into hash table */ 159 /* comoute mc addresses' hash value ,and put it into hash table */
160 for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { 160 netdev_for_each_mc_addr(mc_ptr, netdev) {
161 hash_value = atl2_hash_mc_addr(hw, mc_ptr->dmi_addr); 161 hash_value = atl2_hash_mc_addr(hw, mc_ptr->dmi_addr);
162 atl2_hash_set(hw, hash_value); 162 atl2_hash_set(hw, hash_value);
163 } 163 }
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c
index 3dc014215679..72f3306352e2 100644
--- a/drivers/net/atlx/atlx.c
+++ b/drivers/net/atlx/atlx.c
@@ -144,7 +144,7 @@ static void atlx_set_multi(struct net_device *netdev)
144 iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); 144 iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
145 145
146 /* compute mc addresses' hash value ,and put it into hash table */ 146 /* compute mc addresses' hash value ,and put it into hash table */
147 for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { 147 netdev_for_each_mc_addr(mc_ptr, netdev) {
148 hash_value = atlx_hash_mc_addr(hw, mc_ptr->dmi_addr); 148 hash_value = atlx_hash_mc_addr(hw, mc_ptr->dmi_addr);
149 atlx_hash_set(hw, hash_value); 149 atlx_hash_set(hw, hash_value);
150 } 150 }
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 2f8261c9614a..6ad16205dc17 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -861,7 +861,7 @@ static void set_rx_mode_8002(struct net_device *dev)
861 struct net_local *lp = netdev_priv(dev); 861 struct net_local *lp = netdev_priv(dev);
862 long ioaddr = dev->base_addr; 862 long ioaddr = dev->base_addr;
863 863
864 if (dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC))) 864 if (!netdev_mc_empty(dev) || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC)))
865 lp->addr_mode = CMR2h_PROMISC; 865 lp->addr_mode = CMR2h_PROMISC;
866 else 866 else
867 lp->addr_mode = CMR2h_Normal; 867 lp->addr_mode = CMR2h_Normal;
@@ -877,7 +877,8 @@ static void set_rx_mode_8012(struct net_device *dev)
877 877
878 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 878 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
879 new_mode = CMR2h_PROMISC; 879 new_mode = CMR2h_PROMISC;
880 } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) { 880 } else if ((netdev_mc_count(dev) > 1000) ||
881 (dev->flags & IFF_ALLMULTI)) {
881 /* Too many to filter perfectly -- accept all multicasts. */ 882 /* Too many to filter perfectly -- accept all multicasts. */
882 memset(mc_filter, 0xff, sizeof(mc_filter)); 883 memset(mc_filter, 0xff, sizeof(mc_filter));
883 new_mode = CMR2h_Normal; 884 new_mode = CMR2h_Normal;
@@ -885,9 +886,7 @@ static void set_rx_mode_8012(struct net_device *dev)
885 struct dev_mc_list *mclist; 886 struct dev_mc_list *mclist;
886 887
887 memset(mc_filter, 0, sizeof(mc_filter)); 888 memset(mc_filter, 0, sizeof(mc_filter));
888 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 889 netdev_for_each_mc_addr(mclist, dev) {
889 i++, mclist = mclist->next)
890 {
891 int filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f; 890 int filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
892 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); 891 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
893 } 892 }
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 6e5a68ecde09..4da191b87b0d 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -952,21 +952,18 @@ static void au1000_multicast_list(struct net_device *dev)
952 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 952 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
953 aup->mac->control |= MAC_PROMISCUOUS; 953 aup->mac->control |= MAC_PROMISCUOUS;
954 } else if ((dev->flags & IFF_ALLMULTI) || 954 } else if ((dev->flags & IFF_ALLMULTI) ||
955 dev->mc_count > MULTICAST_FILTER_LIMIT) { 955 netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
956 aup->mac->control |= MAC_PASS_ALL_MULTI; 956 aup->mac->control |= MAC_PASS_ALL_MULTI;
957 aup->mac->control &= ~MAC_PROMISCUOUS; 957 aup->mac->control &= ~MAC_PROMISCUOUS;
958 printk(KERN_INFO "%s: Pass all multicast\n", dev->name); 958 printk(KERN_INFO "%s: Pass all multicast\n", dev->name);
959 } else { 959 } else {
960 int i;
961 struct dev_mc_list *mclist; 960 struct dev_mc_list *mclist;
962 u32 mc_filter[2]; /* Multicast hash filter */ 961 u32 mc_filter[2]; /* Multicast hash filter */
963 962
964 mc_filter[1] = mc_filter[0] = 0; 963 mc_filter[1] = mc_filter[0] = 0;
965 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 964 netdev_for_each_mc_addr(mclist, dev)
966 i++, mclist = mclist->next) {
967 set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26, 965 set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26,
968 (long *)mc_filter); 966 (long *)mc_filter);
969 }
970 aup->mac->multi_hash_high = mc_filter[1]; 967 aup->mac->multi_hash_high = mc_filter[1];
971 aup->mac->multi_hash_low = mc_filter[0]; 968 aup->mac->multi_hash_low = mc_filter[0];
972 aup->mac->control &= ~MAC_PROMISCUOUS; 969 aup->mac->control &= ~MAC_PROMISCUOUS;
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 4869adb69586..332c60356285 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -10,6 +10,8 @@
10 * Distribute under GPL. 10 * Distribute under GPL.
11 */ 11 */
12 12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
13#include <linux/kernel.h> 15#include <linux/kernel.h>
14#include <linux/module.h> 16#include <linux/module.h>
15#include <linux/moduleparam.h> 17#include <linux/moduleparam.h>
@@ -34,7 +36,6 @@
34#include "b44.h" 36#include "b44.h"
35 37
36#define DRV_MODULE_NAME "b44" 38#define DRV_MODULE_NAME "b44"
37#define PFX DRV_MODULE_NAME ": "
38#define DRV_MODULE_VERSION "2.0" 39#define DRV_MODULE_VERSION "2.0"
39 40
40#define B44_DEF_MSG_ENABLE \ 41#define B44_DEF_MSG_ENABLE \
@@ -102,7 +103,7 @@ MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
102 103
103 104
104#ifdef CONFIG_B44_PCI 105#ifdef CONFIG_B44_PCI
105static const struct pci_device_id b44_pci_tbl[] = { 106static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
106 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) }, 107 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
107 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) }, 108 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
108 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) }, 109 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
@@ -189,11 +190,10 @@ static int b44_wait_bit(struct b44 *bp, unsigned long reg,
189 udelay(10); 190 udelay(10);
190 } 191 }
191 if (i == timeout) { 192 if (i == timeout) {
192 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register " 193 if (net_ratelimit())
193 "%lx to %s.\n", 194 netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n",
194 bp->dev->name, 195 bit, reg, clear ? "clear" : "set");
195 bit, reg, 196
196 (clear ? "clear" : "set"));
197 return -ENODEV; 197 return -ENODEV;
198 } 198 }
199 return 0; 199 return 0;
@@ -333,13 +333,12 @@ static int b44_phy_reset(struct b44 *bp)
333 err = b44_readphy(bp, MII_BMCR, &val); 333 err = b44_readphy(bp, MII_BMCR, &val);
334 if (!err) { 334 if (!err) {
335 if (val & BMCR_RESET) { 335 if (val & BMCR_RESET) {
336 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n", 336 netdev_err(bp->dev, "PHY Reset would not complete\n");
337 bp->dev->name);
338 err = -ENODEV; 337 err = -ENODEV;
339 } 338 }
340 } 339 }
341 340
342 return 0; 341 return err;
343} 342}
344 343
345static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags) 344static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
@@ -413,7 +412,7 @@ static void b44_wap54g10_workaround(struct b44 *bp)
413 } 412 }
414 return; 413 return;
415error: 414error:
416 printk(KERN_WARNING PFX "PHY: cannot reset MII transceiver isolate bit.\n"); 415 pr_warning("PHY: cannot reset MII transceiver isolate bit\n");
417} 416}
418#else 417#else
419static inline void b44_wap54g10_workaround(struct b44 *bp) 418static inline void b44_wap54g10_workaround(struct b44 *bp)
@@ -506,18 +505,15 @@ static void b44_stats_update(struct b44 *bp)
506static void b44_link_report(struct b44 *bp) 505static void b44_link_report(struct b44 *bp)
507{ 506{
508 if (!netif_carrier_ok(bp->dev)) { 507 if (!netif_carrier_ok(bp->dev)) {
509 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name); 508 netdev_info(bp->dev, "Link is down\n");
510 } else { 509 } else {
511 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n", 510 netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
512 bp->dev->name, 511 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
513 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10, 512 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
514 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half"); 513
515 514 netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
516 printk(KERN_INFO PFX "%s: Flow control is %s for TX and " 515 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
517 "%s for RX.\n", 516 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
518 bp->dev->name,
519 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
520 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
521 } 517 }
522} 518}
523 519
@@ -576,11 +572,9 @@ static void b44_check_phy(struct b44 *bp)
576 } 572 }
577 573
578 if (bmsr & BMSR_RFAULT) 574 if (bmsr & BMSR_RFAULT)
579 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n", 575 netdev_warn(bp->dev, "Remote fault detected in PHY\n");
580 bp->dev->name);
581 if (bmsr & BMSR_JCD) 576 if (bmsr & BMSR_JCD)
582 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n", 577 netdev_warn(bp->dev, "Jabber detected in PHY\n");
583 bp->dev->name);
584 } 578 }
585} 579}
586 580
@@ -815,7 +809,7 @@ static int b44_rx(struct b44 *bp, int budget)
815 struct sk_buff *copy_skb; 809 struct sk_buff *copy_skb;
816 810
817 b44_recycle_rx(bp, cons, bp->rx_prod); 811 b44_recycle_rx(bp, cons, bp->rx_prod);
818 copy_skb = dev_alloc_skb(len + 2); 812 copy_skb = netdev_alloc_skb(bp->dev, len + 2);
819 if (copy_skb == NULL) 813 if (copy_skb == NULL)
820 goto drop_it_no_recycle; 814 goto drop_it_no_recycle;
821 815
@@ -901,7 +895,7 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id)
901 handled = 1; 895 handled = 1;
902 896
903 if (unlikely(!netif_running(dev))) { 897 if (unlikely(!netif_running(dev))) {
904 printk(KERN_INFO "%s: late interrupt.\n", dev->name); 898 netdev_info(dev, "late interrupt\n");
905 goto irq_ack; 899 goto irq_ack;
906 } 900 }
907 901
@@ -926,8 +920,7 @@ static void b44_tx_timeout(struct net_device *dev)
926{ 920{
927 struct b44 *bp = netdev_priv(dev); 921 struct b44 *bp = netdev_priv(dev);
928 922
929 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n", 923 netdev_err(dev, "transmit timed out, resetting\n");
930 dev->name);
931 924
932 spin_lock_irq(&bp->lock); 925 spin_lock_irq(&bp->lock);
933 926
@@ -956,8 +949,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
956 /* This is a hard error, log it. */ 949 /* This is a hard error, log it. */
957 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) { 950 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
958 netif_stop_queue(dev); 951 netif_stop_queue(dev);
959 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", 952 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
960 dev->name);
961 goto err_out; 953 goto err_out;
962 } 954 }
963 955
@@ -1333,7 +1325,7 @@ static void b44_halt(struct b44 *bp)
1333 /* reset PHY */ 1325 /* reset PHY */
1334 b44_phy_reset(bp); 1326 b44_phy_reset(bp);
1335 /* power down PHY */ 1327 /* power down PHY */
1336 printk(KERN_INFO PFX "%s: powering down PHY\n", bp->dev->name); 1328 netdev_info(bp->dev, "powering down PHY\n");
1337 bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN); 1329 bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1338 /* now reset the chip, but without enabling the MAC&PHY 1330 /* now reset the chip, but without enabling the MAC&PHY
1339 * part of it. This has to be done _after_ we shut down the PHY */ 1331 * part of it. This has to be done _after_ we shut down the PHY */
@@ -1524,7 +1516,7 @@ static void b44_setup_pseudo_magicp(struct b44 *bp)
1524 1516
1525 pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL); 1517 pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1526 if (!pwol_pattern) { 1518 if (!pwol_pattern) {
1527 printk(KERN_ERR PFX "Memory not available for WOL\n"); 1519 pr_err("Memory not available for WOL\n");
1528 return; 1520 return;
1529 } 1521 }
1530 1522
@@ -1691,10 +1683,12 @@ static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1691 struct dev_mc_list *mclist; 1683 struct dev_mc_list *mclist;
1692 int i, num_ents; 1684 int i, num_ents;
1693 1685
1694 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE); 1686 num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1695 mclist = dev->mc_list; 1687 i = 0;
1696 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) { 1688 netdev_for_each_mc_addr(mclist, dev) {
1697 __b44_cam_write(bp, mclist->dmi_addr, i + 1); 1689 if (i == num_ents)
1690 break;
1691 __b44_cam_write(bp, mclist->dmi_addr, i++ + 1);
1698 } 1692 }
1699 return i+1; 1693 return i+1;
1700} 1694}
@@ -1716,7 +1710,7 @@ static void __b44_set_rx_mode(struct net_device *dev)
1716 __b44_set_mac_addr(bp); 1710 __b44_set_mac_addr(bp);
1717 1711
1718 if ((dev->flags & IFF_ALLMULTI) || 1712 if ((dev->flags & IFF_ALLMULTI) ||
1719 (dev->mc_count > B44_MCAST_TABLE_SIZE)) 1713 (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1720 val |= RXCONFIG_ALLMULTI; 1714 val |= RXCONFIG_ALLMULTI;
1721 else 1715 else
1722 i = __b44_load_mcast(bp, dev); 1716 i = __b44_load_mcast(bp, dev);
@@ -2097,7 +2091,7 @@ static int __devinit b44_get_invariants(struct b44 *bp)
2097 memcpy(bp->dev->dev_addr, addr, 6); 2091 memcpy(bp->dev->dev_addr, addr, 6);
2098 2092
2099 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){ 2093 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2100 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n"); 2094 pr_err("Invalid MAC address found in EEPROM\n");
2101 return -EINVAL; 2095 return -EINVAL;
2102 } 2096 }
2103 2097
@@ -2142,12 +2136,12 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
2142 instance++; 2136 instance++;
2143 2137
2144 if (b44_version_printed++ == 0) 2138 if (b44_version_printed++ == 0)
2145 printk(KERN_INFO "%s", version); 2139 pr_info("%s", version);
2146 2140
2147 2141
2148 dev = alloc_etherdev(sizeof(*bp)); 2142 dev = alloc_etherdev(sizeof(*bp));
2149 if (!dev) { 2143 if (!dev) {
2150 dev_err(sdev->dev, "Etherdev alloc failed, aborting.\n"); 2144 dev_err(sdev->dev, "Etherdev alloc failed, aborting\n");
2151 err = -ENOMEM; 2145 err = -ENOMEM;
2152 goto out; 2146 goto out;
2153 } 2147 }
@@ -2186,13 +2180,13 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
2186 err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30)); 2180 err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30));
2187 if (err) { 2181 if (err) {
2188 dev_err(sdev->dev, 2182 dev_err(sdev->dev,
2189 "Required 30BIT DMA mask unsupported by the system.\n"); 2183 "Required 30BIT DMA mask unsupported by the system\n");
2190 goto err_out_powerdown; 2184 goto err_out_powerdown;
2191 } 2185 }
2192 err = b44_get_invariants(bp); 2186 err = b44_get_invariants(bp);
2193 if (err) { 2187 if (err) {
2194 dev_err(sdev->dev, 2188 dev_err(sdev->dev,
2195 "Problem fetching invariants of chip, aborting.\n"); 2189 "Problem fetching invariants of chip, aborting\n");
2196 goto err_out_powerdown; 2190 goto err_out_powerdown;
2197 } 2191 }
2198 2192
@@ -2212,7 +2206,7 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
2212 2206
2213 err = register_netdev(dev); 2207 err = register_netdev(dev);
2214 if (err) { 2208 if (err) {
2215 dev_err(sdev->dev, "Cannot register net device, aborting.\n"); 2209 dev_err(sdev->dev, "Cannot register net device, aborting\n");
2216 goto err_out_powerdown; 2210 goto err_out_powerdown;
2217 } 2211 }
2218 2212
@@ -2223,8 +2217,12 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
2223 */ 2217 */
2224 b44_chip_reset(bp, B44_CHIP_RESET_FULL); 2218 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2225 2219
2226 printk(KERN_INFO "%s: Broadcom 44xx/47xx 10/100BaseT Ethernet %pM\n", 2220 /* do a phy reset to test if there is an active phy */
2227 dev->name, dev->dev_addr); 2221 if (b44_phy_reset(bp) < 0)
2222 bp->phy_addr = B44_PHY_ADDR_NO_PHY;
2223
2224 netdev_info(dev, "Broadcom 44xx/47xx 10/100BaseT Ethernet %pM\n",
2225 dev->dev_addr);
2228 2226
2229 return 0; 2227 return 0;
2230 2228
@@ -2297,7 +2295,7 @@ static int b44_resume(struct ssb_device *sdev)
2297 2295
2298 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev); 2296 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2299 if (rc) { 2297 if (rc) {
2300 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name); 2298 netdev_err(dev, "request_irq failed\n");
2301 return rc; 2299 return rc;
2302 } 2300 }
2303 2301
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 0bd47d32ec42..8cdcab7655c0 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -619,7 +619,7 @@ static void bcm_enet_set_multicast_list(struct net_device *dev)
619 619
620 /* only 3 perfect match registers left, first one is used for 620 /* only 3 perfect match registers left, first one is used for
621 * own mac address */ 621 * own mac address */
622 if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 3) 622 if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
623 val |= ENET_RXCFG_ALLMCAST_MASK; 623 val |= ENET_RXCFG_ALLMCAST_MASK;
624 else 624 else
625 val &= ~ENET_RXCFG_ALLMCAST_MASK; 625 val &= ~ENET_RXCFG_ALLMCAST_MASK;
@@ -631,16 +631,13 @@ static void bcm_enet_set_multicast_list(struct net_device *dev)
631 return; 631 return;
632 } 632 }
633 633
634 for (i = 0, mc_list = dev->mc_list; 634 i = 0;
635 (mc_list != NULL) && (i < dev->mc_count) && (i < 3); 635 netdev_for_each_mc_addr(mc_list, dev) {
636 i++, mc_list = mc_list->next) {
637 u8 *dmi_addr; 636 u8 *dmi_addr;
638 u32 tmp; 637 u32 tmp;
639 638
640 /* filter non ethernet address */ 639 if (i == 3)
641 if (mc_list->dmi_addrlen != 6) 640 break;
642 continue;
643
644 /* update perfect match registers */ 641 /* update perfect match registers */
645 dmi_addr = mc_list->dmi_addr; 642 dmi_addr = mc_list->dmi_addr;
646 tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | 643 tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
@@ -649,7 +646,7 @@ static void bcm_enet_set_multicast_list(struct net_device *dev)
649 646
650 tmp = (dmi_addr[0] << 8 | dmi_addr[1]); 647 tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
651 tmp |= ENET_PMH_DATAVALID_MASK; 648 tmp |= ENET_PMH_DATAVALID_MASK;
652 enet_writel(priv, tmp, ENET_PMH_REG(i + 1)); 649 enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
653 } 650 }
654 651
655 for (; i < 3; i++) { 652 for (; i < 3; i++) {
diff --git a/drivers/net/benet/Kconfig b/drivers/net/benet/Kconfig
index fdb6e81a4374..1a41a49bb619 100644
--- a/drivers/net/benet/Kconfig
+++ b/drivers/net/benet/Kconfig
@@ -1,6 +1,6 @@
1config BE2NET 1config BE2NET
2 tristate "ServerEngines' 10Gbps NIC - BladeEngine 2" 2 tristate "ServerEngines' 10Gbps NIC - BladeEngine"
3 depends on PCI && INET 3 depends on PCI && INET
4 help 4 help
5 This driver implements the NIC functionality for ServerEngines' 5 This driver implements the NIC functionality for ServerEngines'
6 10Gbps network adapter - BladeEngine 2. 6 10Gbps network adapter - BladeEngine.
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 5bc74590c73e..be81fb2d10f7 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -32,28 +32,26 @@
32 32
33#include "be_hw.h" 33#include "be_hw.h"
34 34
35#define DRV_VER "2.101.346u" 35#define DRV_VER "2.102.147u"
36#define DRV_NAME "be2net" 36#define DRV_NAME "be2net"
37#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 37#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
38#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" 38#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
39#define OC_NAME "Emulex OneConnect 10Gbps NIC" 39#define OC_NAME "Emulex OneConnect 10Gbps NIC"
40#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)" 40#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)"
41#define DRV_DESC BE_NAME "Driver" 41#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
42 42
43#define BE_VENDOR_ID 0x19a2 43#define BE_VENDOR_ID 0x19a2
44#define BE_DEVICE_ID1 0x211 44#define BE_DEVICE_ID1 0x211
45#define BE_DEVICE_ID2 0x221 45#define BE_DEVICE_ID2 0x221
46#define OC_DEVICE_ID1 0x700 46#define OC_DEVICE_ID1 0x700
47#define OC_DEVICE_ID2 0x701 47#define OC_DEVICE_ID2 0x710
48#define OC_DEVICE_ID3 0x710
49 48
50static inline char *nic_name(struct pci_dev *pdev) 49static inline char *nic_name(struct pci_dev *pdev)
51{ 50{
52 switch (pdev->device) { 51 switch (pdev->device) {
53 case OC_DEVICE_ID1: 52 case OC_DEVICE_ID1:
54 case OC_DEVICE_ID2:
55 return OC_NAME; 53 return OC_NAME;
56 case OC_DEVICE_ID3: 54 case OC_DEVICE_ID2:
57 return OC_NAME1; 55 return OC_NAME1;
58 case BE_DEVICE_ID2: 56 case BE_DEVICE_ID2:
59 return BE3_NAME; 57 return BE3_NAME;
@@ -153,6 +151,7 @@ struct be_eq_obj {
153struct be_mcc_obj { 151struct be_mcc_obj {
154 struct be_queue_info q; 152 struct be_queue_info q;
155 struct be_queue_info cq; 153 struct be_queue_info cq;
154 bool rearm_cq;
156}; 155};
157 156
158struct be_drvr_stats { 157struct be_drvr_stats {
@@ -165,6 +164,7 @@ struct be_drvr_stats {
165 ulong be_tx_jiffies; 164 ulong be_tx_jiffies;
166 u64 be_tx_bytes; 165 u64 be_tx_bytes;
167 u64 be_tx_bytes_prev; 166 u64 be_tx_bytes_prev;
167 u64 be_tx_pkts;
168 u32 be_tx_rate; 168 u32 be_tx_rate;
169 169
170 u32 cache_barrier[16]; 170 u32 cache_barrier[16];
@@ -176,6 +176,7 @@ struct be_drvr_stats {
176 ulong be_rx_jiffies; 176 ulong be_rx_jiffies;
177 u64 be_rx_bytes; 177 u64 be_rx_bytes;
178 u64 be_rx_bytes_prev; 178 u64 be_rx_bytes_prev;
179 u64 be_rx_pkts;
179 u32 be_rx_rate; 180 u32 be_rx_rate;
180 /* number of non ether type II frames dropped where 181 /* number of non ether type II frames dropped where
181 * frame len > length field of Mac Hdr */ 182 * frame len > length field of Mac Hdr */
@@ -252,7 +253,8 @@ struct be_adapter {
252 bool rx_post_starved; /* Zero rx frags have been posted to BE */ 253 bool rx_post_starved; /* Zero rx frags have been posted to BE */
253 254
254 struct vlan_group *vlan_grp; 255 struct vlan_group *vlan_grp;
255 u16 num_vlans; 256 u16 vlans_added;
257 u16 max_vlans; /* Number of vlans supported */
256 u8 vlan_tag[VLAN_GROUP_ARRAY_LEN]; 258 u8 vlan_tag[VLAN_GROUP_ARRAY_LEN];
257 struct be_dma_mem mc_cmd_mem; 259 struct be_dma_mem mc_cmd_mem;
258 260
@@ -266,6 +268,7 @@ struct be_adapter {
266 u32 if_handle; /* Used to configure filtering */ 268 u32 if_handle; /* Used to configure filtering */
267 u32 pmac_id; /* MAC addr handle used by BE card */ 269 u32 pmac_id; /* MAC addr handle used by BE card */
268 270
271 bool eeh_err;
269 bool link_up; 272 bool link_up;
270 u32 port_num; 273 u32 port_num;
271 bool promiscuous; 274 bool promiscuous;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 006cb2efcd22..4b1f80519ca4 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -104,10 +104,26 @@ static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
104 return NULL; 104 return NULL;
105} 105}
106 106
107int be_process_mcc(struct be_adapter *adapter) 107void be_async_mcc_enable(struct be_adapter *adapter)
108{
109 spin_lock_bh(&adapter->mcc_cq_lock);
110
111 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
112 adapter->mcc_obj.rearm_cq = true;
113
114 spin_unlock_bh(&adapter->mcc_cq_lock);
115}
116
117void be_async_mcc_disable(struct be_adapter *adapter)
118{
119 adapter->mcc_obj.rearm_cq = false;
120}
121
122int be_process_mcc(struct be_adapter *adapter, int *status)
108{ 123{
109 struct be_mcc_compl *compl; 124 struct be_mcc_compl *compl;
110 int num = 0, status = 0; 125 int num = 0;
126 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
111 127
112 spin_lock_bh(&adapter->mcc_cq_lock); 128 spin_lock_bh(&adapter->mcc_cq_lock);
113 while ((compl = be_mcc_compl_get(adapter))) { 129 while ((compl = be_mcc_compl_get(adapter))) {
@@ -119,31 +135,31 @@ int be_process_mcc(struct be_adapter *adapter)
119 be_async_link_state_process(adapter, 135 be_async_link_state_process(adapter,
120 (struct be_async_event_link_state *) compl); 136 (struct be_async_event_link_state *) compl);
121 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 137 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
122 status = be_mcc_compl_process(adapter, compl); 138 *status = be_mcc_compl_process(adapter, compl);
123 atomic_dec(&adapter->mcc_obj.q.used); 139 atomic_dec(&mcc_obj->q.used);
124 } 140 }
125 be_mcc_compl_use(compl); 141 be_mcc_compl_use(compl);
126 num++; 142 num++;
127 } 143 }
128 144
129 if (num)
130 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num);
131
132 spin_unlock_bh(&adapter->mcc_cq_lock); 145 spin_unlock_bh(&adapter->mcc_cq_lock);
133 return status; 146 return num;
134} 147}
135 148
136/* Wait till no more pending mcc requests are present */ 149/* Wait till no more pending mcc requests are present */
137static int be_mcc_wait_compl(struct be_adapter *adapter) 150static int be_mcc_wait_compl(struct be_adapter *adapter)
138{ 151{
139#define mcc_timeout 120000 /* 12s timeout */ 152#define mcc_timeout 120000 /* 12s timeout */
140 int i, status; 153 int i, num, status = 0;
154 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
155
141 for (i = 0; i < mcc_timeout; i++) { 156 for (i = 0; i < mcc_timeout; i++) {
142 status = be_process_mcc(adapter); 157 num = be_process_mcc(adapter, &status);
143 if (status) 158 if (num)
144 return status; 159 be_cq_notify(adapter, mcc_obj->cq.id,
160 mcc_obj->rearm_cq, num);
145 161
146 if (atomic_read(&adapter->mcc_obj.q.used) == 0) 162 if (atomic_read(&mcc_obj->q.used) == 0)
147 break; 163 break;
148 udelay(100); 164 udelay(100);
149 } 165 }
@@ -151,7 +167,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
151 dev_err(&adapter->pdev->dev, "mccq poll timed out\n"); 167 dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
152 return -1; 168 return -1;
153 } 169 }
154 return 0; 170 return status;
155} 171}
156 172
157/* Notify MCC requests and wait for completion */ 173/* Notify MCC requests and wait for completion */
@@ -167,7 +183,14 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
167 u32 ready; 183 u32 ready;
168 184
169 do { 185 do {
170 ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK; 186 ready = ioread32(db);
187 if (ready == 0xffffffff) {
188 dev_err(&adapter->pdev->dev,
189 "pci slot disconnected\n");
190 return -1;
191 }
192
193 ready &= MPU_MAILBOX_DB_RDY_MASK;
171 if (ready) 194 if (ready)
172 break; 195 break;
173 196
@@ -198,6 +221,11 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
198 struct be_mcc_mailbox *mbox = mbox_mem->va; 221 struct be_mcc_mailbox *mbox = mbox_mem->va;
199 struct be_mcc_compl *compl = &mbox->compl; 222 struct be_mcc_compl *compl = &mbox->compl;
200 223
224 /* wait for ready to be set */
225 status = be_mbox_db_ready_wait(adapter, db);
226 if (status != 0)
227 return status;
228
201 val |= MPU_MAILBOX_DB_HI_MASK; 229 val |= MPU_MAILBOX_DB_HI_MASK;
202 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ 230 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
203 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; 231 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
@@ -397,6 +425,9 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
397 u8 *wrb; 425 u8 *wrb;
398 int status; 426 int status;
399 427
428 if (adapter->eeh_err)
429 return -EIO;
430
400 spin_lock(&adapter->mbox_lock); 431 spin_lock(&adapter->mbox_lock);
401 432
402 wrb = (u8 *)wrb_from_mbox(adapter); 433 wrb = (u8 *)wrb_from_mbox(adapter);
@@ -769,6 +800,9 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
769 u8 subsys = 0, opcode = 0; 800 u8 subsys = 0, opcode = 0;
770 int status; 801 int status;
771 802
803 if (adapter->eeh_err)
804 return -EIO;
805
772 spin_lock(&adapter->mbox_lock); 806 spin_lock(&adapter->mbox_lock);
773 807
774 wrb = wrb_from_mbox(adapter); 808 wrb = wrb_from_mbox(adapter);
@@ -857,6 +891,9 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
857 struct be_cmd_req_if_destroy *req; 891 struct be_cmd_req_if_destroy *req;
858 int status; 892 int status;
859 893
894 if (adapter->eeh_err)
895 return -EIO;
896
860 spin_lock(&adapter->mbox_lock); 897 spin_lock(&adapter->mbox_lock);
861 898
862 wrb = wrb_from_mbox(adapter); 899 wrb = wrb_from_mbox(adapter);
@@ -1097,8 +1134,7 @@ err:
1097 * (mc == NULL) => multicast promiscous 1134 * (mc == NULL) => multicast promiscous
1098 */ 1135 */
1099int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id, 1136int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1100 struct dev_mc_list *mc_list, u32 mc_count, 1137 struct net_device *netdev, struct be_dma_mem *mem)
1101 struct be_dma_mem *mem)
1102{ 1138{
1103 struct be_mcc_wrb *wrb; 1139 struct be_mcc_wrb *wrb;
1104 struct be_cmd_req_mcast_mac_config *req = mem->va; 1140 struct be_cmd_req_mcast_mac_config *req = mem->va;
@@ -1125,13 +1161,14 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1125 OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req)); 1161 OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
1126 1162
1127 req->interface_id = if_id; 1163 req->interface_id = if_id;
1128 if (mc_list) { 1164 if (netdev) {
1129 int i; 1165 int i;
1130 struct dev_mc_list *mc; 1166 struct dev_mc_list *mc;
1131 1167
1132 req->num_mac = cpu_to_le16(mc_count); 1168 req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
1133 1169
1134 for (mc = mc_list, i = 0; mc; mc = mc->next, i++) 1170 i = 0;
1171 netdev_for_each_mc_addr(mc, netdev)
1135 memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN); 1172 memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
1136 } else { 1173 } else {
1137 req->promiscuous = 1; 1174 req->promiscuous = 1;
@@ -1375,7 +1412,7 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1375 u32 flash_type, u32 flash_opcode, u32 buf_size) 1412 u32 flash_type, u32 flash_opcode, u32 buf_size)
1376{ 1413{
1377 struct be_mcc_wrb *wrb; 1414 struct be_mcc_wrb *wrb;
1378 struct be_cmd_write_flashrom *req = cmd->va; 1415 struct be_cmd_write_flashrom *req;
1379 struct be_sge *sge; 1416 struct be_sge *sge;
1380 int status; 1417 int status;
1381 1418
@@ -1409,7 +1446,8 @@ err:
1409 return status; 1446 return status;
1410} 1447}
1411 1448
1412int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc) 1449int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1450 int offset)
1413{ 1451{
1414 struct be_mcc_wrb *wrb; 1452 struct be_mcc_wrb *wrb;
1415 struct be_cmd_write_flashrom *req; 1453 struct be_cmd_write_flashrom *req;
@@ -1430,9 +1468,9 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc)
1430 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1468 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1431 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4); 1469 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
1432 1470
1433 req->params.op_type = cpu_to_le32(FLASHROM_TYPE_REDBOOT); 1471 req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
1434 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); 1472 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1435 req->params.offset = 0x3FFFC; 1473 req->params.offset = offset;
1436 req->params.data_buf_size = 0x4; 1474 req->params.data_buf_size = 0x4;
1437 1475
1438 status = be_mcc_notify_wait(adapter); 1476 status = be_mcc_notify_wait(adapter);
@@ -1608,3 +1646,33 @@ err:
1608 spin_unlock_bh(&adapter->mcc_lock); 1646 spin_unlock_bh(&adapter->mcc_lock);
1609 return status; 1647 return status;
1610} 1648}
1649
1650extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1651 struct be_dma_mem *nonemb_cmd)
1652{
1653 struct be_mcc_wrb *wrb;
1654 struct be_cmd_req_seeprom_read *req;
1655 struct be_sge *sge;
1656 int status;
1657
1658 spin_lock_bh(&adapter->mcc_lock);
1659
1660 wrb = wrb_from_mccq(adapter);
1661 req = nonemb_cmd->va;
1662 sge = nonembedded_sgl(wrb);
1663
1664 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1665 OPCODE_COMMON_SEEPROM_READ);
1666
1667 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1668 OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
1669
1670 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1671 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1672 sge->len = cpu_to_le32(nonemb_cmd->size);
1673
1674 status = be_mcc_notify_wait(adapter);
1675
1676 spin_unlock_bh(&adapter->mcc_lock);
1677 return status;
1678}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 13b33c841083..cce61f9a3714 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -124,6 +124,7 @@ struct be_mcc_mailbox {
124#define OPCODE_COMMON_CQ_CREATE 12 124#define OPCODE_COMMON_CQ_CREATE 12
125#define OPCODE_COMMON_EQ_CREATE 13 125#define OPCODE_COMMON_EQ_CREATE 13
126#define OPCODE_COMMON_MCC_CREATE 21 126#define OPCODE_COMMON_MCC_CREATE 21
127#define OPCODE_COMMON_SEEPROM_READ 30
127#define OPCODE_COMMON_NTWK_RX_FILTER 34 128#define OPCODE_COMMON_NTWK_RX_FILTER 34
128#define OPCODE_COMMON_GET_FW_VERSION 35 129#define OPCODE_COMMON_GET_FW_VERSION 35
129#define OPCODE_COMMON_SET_FLOW_CONTROL 36 130#define OPCODE_COMMON_SET_FLOW_CONTROL 36
@@ -855,6 +856,19 @@ struct be_cmd_resp_ddrdma_test {
855 u8 rcv_buff[4096]; 856 u8 rcv_buff[4096];
856}; 857};
857 858
859/*********************** SEEPROM Read ***********************/
860
861#define BE_READ_SEEPROM_LEN 1024
862struct be_cmd_req_seeprom_read {
863 struct be_cmd_req_hdr hdr;
864 u8 rsvd0[BE_READ_SEEPROM_LEN];
865};
866
867struct be_cmd_resp_seeprom_read {
868 struct be_cmd_req_hdr hdr;
869 u8 seeprom_data[BE_READ_SEEPROM_LEN];
870};
871
858extern int be_pci_fnum_get(struct be_adapter *adapter); 872extern int be_pci_fnum_get(struct be_adapter *adapter);
859extern int be_cmd_POST(struct be_adapter *adapter); 873extern int be_cmd_POST(struct be_adapter *adapter);
860extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 874extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -898,8 +912,7 @@ extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
898extern int be_cmd_promiscuous_config(struct be_adapter *adapter, 912extern int be_cmd_promiscuous_config(struct be_adapter *adapter,
899 u8 port_num, bool en); 913 u8 port_num, bool en);
900extern int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id, 914extern int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
901 struct dev_mc_list *mc_list, u32 mc_count, 915 struct net_device *netdev, struct be_dma_mem *mem);
902 struct be_dma_mem *mem);
903extern int be_cmd_set_flow_control(struct be_adapter *adapter, 916extern int be_cmd_set_flow_control(struct be_adapter *adapter,
904 u32 tx_fc, u32 rx_fc); 917 u32 tx_fc, u32 rx_fc);
905extern int be_cmd_get_flow_control(struct be_adapter *adapter, 918extern int be_cmd_get_flow_control(struct be_adapter *adapter,
@@ -907,7 +920,7 @@ extern int be_cmd_get_flow_control(struct be_adapter *adapter,
907extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, 920extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
908 u32 *port_num, u32 *cap); 921 u32 *port_num, u32 *cap);
909extern int be_cmd_reset_function(struct be_adapter *adapter); 922extern int be_cmd_reset_function(struct be_adapter *adapter);
910extern int be_process_mcc(struct be_adapter *adapter); 923extern int be_process_mcc(struct be_adapter *adapter, int *status);
911extern int be_cmd_set_beacon_state(struct be_adapter *adapter, 924extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
912 u8 port_num, u8 beacon, u8 status, u8 state); 925 u8 port_num, u8 beacon, u8 status, u8 state);
913extern int be_cmd_get_beacon_state(struct be_adapter *adapter, 926extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
@@ -917,15 +930,21 @@ extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
917extern int be_cmd_write_flashrom(struct be_adapter *adapter, 930extern int be_cmd_write_flashrom(struct be_adapter *adapter,
918 struct be_dma_mem *cmd, u32 flash_oper, 931 struct be_dma_mem *cmd, u32 flash_oper,
919 u32 flash_opcode, u32 buf_size); 932 u32 flash_opcode, u32 buf_size);
920extern int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc); 933int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
934 int offset);
921extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 935extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
922 struct be_dma_mem *nonemb_cmd); 936 struct be_dma_mem *nonemb_cmd);
923extern int be_cmd_fw_init(struct be_adapter *adapter); 937extern int be_cmd_fw_init(struct be_adapter *adapter);
924extern int be_cmd_fw_clean(struct be_adapter *adapter); 938extern int be_cmd_fw_clean(struct be_adapter *adapter);
939extern void be_async_mcc_enable(struct be_adapter *adapter);
940extern void be_async_mcc_disable(struct be_adapter *adapter);
925extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, 941extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
926 u32 loopback_type, u32 pkt_size, 942 u32 loopback_type, u32 pkt_size,
927 u32 num_pkts, u64 pattern); 943 u32 num_pkts, u64 pattern);
928extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, 944extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
929 u32 byte_cnt, struct be_dma_mem *cmd); 945 u32 byte_cnt, struct be_dma_mem *cmd);
946extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
947 struct be_dma_mem *nonemb_cmd);
930extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 948extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
931 u8 loopback_type, u8 enable); 949 u8 loopback_type, u8 enable);
950
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 5d001c4deac1..9560d48944ab 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -112,6 +112,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
112 "PHY Loopback test", 112 "PHY Loopback test",
113 "External Loopback test", 113 "External Loopback test",
114 "DDR DMA test" 114 "DDR DMA test"
115 "Link test"
115}; 116};
116 117
117#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests) 118#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
@@ -529,6 +530,9 @@ static void
529be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) 530be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
530{ 531{
531 struct be_adapter *adapter = netdev_priv(netdev); 532 struct be_adapter *adapter = netdev_priv(netdev);
533 bool link_up;
534 u8 mac_speed = 0;
535 u16 qos_link_speed = 0;
532 536
533 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); 537 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
534 538
@@ -545,12 +549,20 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
545 &data[2]) != 0) { 549 &data[2]) != 0) {
546 test->flags |= ETH_TEST_FL_FAILED; 550 test->flags |= ETH_TEST_FL_FAILED;
547 } 551 }
552 }
548 553
549 data[3] = be_test_ddr_dma(adapter); 554 if (be_test_ddr_dma(adapter) != 0) {
550 if (data[3] != 0) 555 data[3] = 1;
551 test->flags |= ETH_TEST_FL_FAILED; 556 test->flags |= ETH_TEST_FL_FAILED;
552 } 557 }
553 558
559 if (be_cmd_link_status_query(adapter, &link_up, &mac_speed,
560 &qos_link_speed) != 0) {
561 test->flags |= ETH_TEST_FL_FAILED;
562 data[4] = -1;
563 } else if (mac_speed) {
564 data[4] = 1;
565 }
554} 566}
555 567
556static int 568static int
@@ -567,12 +579,57 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
567 return be_load_fw(adapter, file_name); 579 return be_load_fw(adapter, file_name);
568} 580}
569 581
582static int
583be_get_eeprom_len(struct net_device *netdev)
584{
585 return BE_READ_SEEPROM_LEN;
586}
587
588static int
589be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
590 uint8_t *data)
591{
592 struct be_adapter *adapter = netdev_priv(netdev);
593 struct be_dma_mem eeprom_cmd;
594 struct be_cmd_resp_seeprom_read *resp;
595 int status;
596
597 if (!eeprom->len)
598 return -EINVAL;
599
600 eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
601
602 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
603 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
604 eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size,
605 &eeprom_cmd.dma);
606
607 if (!eeprom_cmd.va) {
608 dev_err(&adapter->pdev->dev,
609 "Memory allocation failure. Could not read eeprom\n");
610 return -ENOMEM;
611 }
612
613 status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
614
615 if (!status) {
616 resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
617 memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
618 }
619 pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va,
620 eeprom_cmd.dma);
621
622 return status;
623}
624
570const struct ethtool_ops be_ethtool_ops = { 625const struct ethtool_ops be_ethtool_ops = {
571 .get_settings = be_get_settings, 626 .get_settings = be_get_settings,
572 .get_drvinfo = be_get_drvinfo, 627 .get_drvinfo = be_get_drvinfo,
573 .get_wol = be_get_wol, 628 .get_wol = be_get_wol,
574 .set_wol = be_set_wol, 629 .set_wol = be_set_wol,
575 .get_link = ethtool_op_get_link, 630 .get_link = ethtool_op_get_link,
631 .get_eeprom_len = be_get_eeprom_len,
632 .get_eeprom = be_read_eeprom,
576 .get_coalesce = be_get_coalesce, 633 .get_coalesce = be_get_coalesce,
577 .set_coalesce = be_set_coalesce, 634 .set_coalesce = be_set_coalesce,
578 .get_ringparam = be_get_ringparam, 635 .get_ringparam = be_get_ringparam,
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index e2b3beffd49d..5ffb149181ad 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -99,6 +99,63 @@
99/* Number of entries posted */ 99/* Number of entries posted */
100#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */ 100#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
101 101
102/* Flashrom related descriptors */
103#define IMAGE_TYPE_FIRMWARE 160
104#define IMAGE_TYPE_BOOTCODE 224
105#define IMAGE_TYPE_OPTIONROM 32
106
107#define NUM_FLASHDIR_ENTRIES 32
108
109#define IMG_TYPE_ISCSI_ACTIVE 0
110#define IMG_TYPE_REDBOOT 1
111#define IMG_TYPE_BIOS 2
112#define IMG_TYPE_PXE_BIOS 3
113#define IMG_TYPE_FCOE_BIOS 8
114#define IMG_TYPE_ISCSI_BACKUP 9
115#define IMG_TYPE_FCOE_FW_ACTIVE 10
116#define IMG_TYPE_FCOE_FW_BACKUP 11
117#define IMG_TYPE_NCSI_BITFILE 13
118#define IMG_TYPE_NCSI_8051 14
119
120#define FLASHROM_OPER_FLASH 1
121#define FLASHROM_OPER_SAVE 2
122#define FLASHROM_OPER_REPORT 4
123
124#define FLASH_IMAGE_MAX_SIZE_g2 (1310720) /* Max firmware image sz */
125#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 (262144) /* Max OPTION ROM img sz */
126#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 (262144) /* Max Redboot image sz */
127#define FLASH_IMAGE_MAX_SIZE_g3 (2097152) /* Max fw image size */
128#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 (524288) /* Max OPTION ROM img sz */
129#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 (1048576) /* Max Redboot image sz */
130
131#define FLASH_NCSI_MAGIC (0x16032009)
132#define FLASH_NCSI_DISABLED (0)
133#define FLASH_NCSI_ENABLED (1)
134
135#define FLASH_NCSI_BITFILE_HDR_OFFSET (0x600000)
136
137/* Offsets for components on Flash. */
138#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 (1048576)
139#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 (2359296)
140#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 (3670016)
141#define FLASH_FCoE_BACKUP_IMAGE_START_g2 (4980736)
142#define FLASH_iSCSI_BIOS_START_g2 (7340032)
143#define FLASH_PXE_BIOS_START_g2 (7864320)
144#define FLASH_FCoE_BIOS_START_g2 (524288)
145#define FLASH_REDBOOT_START_g2 (0)
146
147#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 (2097152)
148#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 (4194304)
149#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 (6291456)
150#define FLASH_FCoE_BACKUP_IMAGE_START_g3 (8388608)
151#define FLASH_iSCSI_BIOS_START_g3 (12582912)
152#define FLASH_PXE_BIOS_START_g3 (13107200)
153#define FLASH_FCoE_BIOS_START_g3 (13631488)
154#define FLASH_REDBOOT_START_g3 (262144)
155
156
157
158
102/* 159/*
103 * BE descriptors: host memory data structures whose formats 160 * BE descriptors: host memory data structures whose formats
104 * are hardwired in BE silicon. 161 * are hardwired in BE silicon.
@@ -107,6 +164,7 @@
107#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */ 164#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
108#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */ 165#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
109#define EQ_ENTRY_RES_ID_SHIFT 16 166#define EQ_ENTRY_RES_ID_SHIFT 16
167
110struct be_eq_entry { 168struct be_eq_entry {
111 u32 evt; 169 u32 evt;
112}; 170};
@@ -221,41 +279,6 @@ struct be_eth_rx_compl {
221 u32 dw[4]; 279 u32 dw[4];
222}; 280};
223 281
224/* Flashrom related descriptors */
225#define IMAGE_TYPE_FIRMWARE 160
226#define IMAGE_TYPE_BOOTCODE 224
227#define IMAGE_TYPE_OPTIONROM 32
228
229#define NUM_FLASHDIR_ENTRIES 32
230
231#define FLASHROM_TYPE_ISCSI_ACTIVE 0
232#define FLASHROM_TYPE_REDBOOT 1
233#define FLASHROM_TYPE_BIOS 2
234#define FLASHROM_TYPE_PXE_BIOS 3
235#define FLASHROM_TYPE_FCOE_BIOS 8
236#define FLASHROM_TYPE_ISCSI_BACKUP 9
237#define FLASHROM_TYPE_FCOE_FW_ACTIVE 10
238#define FLASHROM_TYPE_FCOE_FW_BACKUP 11
239
240#define FLASHROM_OPER_FLASH 1
241#define FLASHROM_OPER_SAVE 2
242#define FLASHROM_OPER_REPORT 4
243
244#define FLASH_IMAGE_MAX_SIZE (1310720) /* Max firmware image size */
245#define FLASH_BIOS_IMAGE_MAX_SIZE (262144) /* Max OPTION ROM image sz */
246#define FLASH_REDBOOT_IMAGE_MAX_SIZE (262144) /* Max redboot image sz */
247
248/* Offsets for components on Flash. */
249#define FLASH_iSCSI_PRIMARY_IMAGE_START (1048576)
250#define FLASH_iSCSI_BACKUP_IMAGE_START (2359296)
251#define FLASH_FCoE_PRIMARY_IMAGE_START (3670016)
252#define FLASH_FCoE_BACKUP_IMAGE_START (4980736)
253#define FLASH_iSCSI_BIOS_START (7340032)
254#define FLASH_PXE_BIOS_START (7864320)
255#define FLASH_FCoE_BIOS_START (524288)
256#define FLASH_REDBOOT_START (32768)
257#define FLASH_REDBOOT_ISM_START (0)
258
259struct controller_id { 282struct controller_id {
260 u32 vendor; 283 u32 vendor;
261 u32 device; 284 u32 device;
@@ -263,7 +286,20 @@ struct controller_id {
263 u32 subdevice; 286 u32 subdevice;
264}; 287};
265 288
266struct flash_file_hdr { 289struct flash_comp {
290 unsigned long offset;
291 int optype;
292 int size;
293};
294
295struct image_hdr {
296 u32 imageid;
297 u32 imageoffset;
298 u32 imagelength;
299 u32 image_checksum;
300 u8 image_version[32];
301};
302struct flash_file_hdr_g2 {
267 u8 sign[32]; 303 u8 sign[32];
268 u32 cksum; 304 u32 cksum;
269 u32 antidote; 305 u32 antidote;
@@ -275,6 +311,17 @@ struct flash_file_hdr {
275 u8 build[24]; 311 u8 build[24];
276}; 312};
277 313
314struct flash_file_hdr_g3 {
315 u8 sign[52];
316 u8 ufi_version[4];
317 u32 file_len;
318 u32 cksum;
319 u32 antidote;
320 u32 num_imgs;
321 u8 build[24];
322 u8 rsvd[32];
323};
324
278struct flash_section_hdr { 325struct flash_section_hdr {
279 u32 format_rev; 326 u32 format_rev;
280 u32 cksum; 327 u32 cksum;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 626b76c0ebc7..a703ed8e24fe 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -34,7 +34,6 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
34 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, 34 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
35 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 35 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
36 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 36 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
37 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
38 { 0 } 37 { 0 }
39}; 38};
40MODULE_DEVICE_TABLE(pci, be_dev_ids); 39MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -69,6 +68,9 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
69 u32 reg = ioread32(addr); 68 u32 reg = ioread32(addr);
70 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 69 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
71 70
71 if (adapter->eeh_err)
72 return;
73
72 if (!enabled && enable) 74 if (!enabled && enable)
73 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 75 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
74 else if (enabled && !enable) 76 else if (enabled && !enable)
@@ -100,6 +102,10 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
100{ 102{
101 u32 val = 0; 103 u32 val = 0;
102 val |= qid & DB_EQ_RING_ID_MASK; 104 val |= qid & DB_EQ_RING_ID_MASK;
105
106 if (adapter->eeh_err)
107 return;
108
103 if (arm) 109 if (arm)
104 val |= 1 << DB_EQ_REARM_SHIFT; 110 val |= 1 << DB_EQ_REARM_SHIFT;
105 if (clear_int) 111 if (clear_int)
@@ -113,6 +119,10 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
113{ 119{
114 u32 val = 0; 120 u32 val = 0;
115 val |= qid & DB_CQ_RING_ID_MASK; 121 val |= qid & DB_CQ_RING_ID_MASK;
122
123 if (adapter->eeh_err)
124 return;
125
116 if (arm) 126 if (arm)
117 val |= 1 << DB_CQ_REARM_SHIFT; 127 val |= 1 << DB_CQ_REARM_SHIFT;
118 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT; 128 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
@@ -149,13 +159,10 @@ void netdev_stats_update(struct be_adapter *adapter)
149 struct net_device_stats *dev_stats = &adapter->netdev->stats; 159 struct net_device_stats *dev_stats = &adapter->netdev->stats;
150 struct be_erx_stats *erx_stats = &hw_stats->erx; 160 struct be_erx_stats *erx_stats = &hw_stats->erx;
151 161
152 dev_stats->rx_packets = port_stats->rx_total_frames; 162 dev_stats->rx_packets = drvr_stats(adapter)->be_rx_pkts;
153 dev_stats->tx_packets = port_stats->tx_unicastframes + 163 dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
154 port_stats->tx_multicastframes + port_stats->tx_broadcastframes; 164 dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
155 dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 | 165 dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
156 (u64) port_stats->rx_bytes_lsd;
157 dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 |
158 (u64) port_stats->tx_bytes_lsd;
159 166
160 /* bad pkts received */ 167 /* bad pkts received */
161 dev_stats->rx_errors = port_stats->rx_crc_errors + 168 dev_stats->rx_errors = port_stats->rx_crc_errors +
@@ -312,12 +319,13 @@ static void be_tx_rate_update(struct be_adapter *adapter)
312} 319}
313 320
314static void be_tx_stats_update(struct be_adapter *adapter, 321static void be_tx_stats_update(struct be_adapter *adapter,
315 u32 wrb_cnt, u32 copied, bool stopped) 322 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
316{ 323{
317 struct be_drvr_stats *stats = drvr_stats(adapter); 324 struct be_drvr_stats *stats = drvr_stats(adapter);
318 stats->be_tx_reqs++; 325 stats->be_tx_reqs++;
319 stats->be_tx_wrbs += wrb_cnt; 326 stats->be_tx_wrbs += wrb_cnt;
320 stats->be_tx_bytes += copied; 327 stats->be_tx_bytes += copied;
328 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
321 if (stopped) 329 if (stopped)
322 stats->be_tx_stops++; 330 stats->be_tx_stops++;
323} 331}
@@ -462,7 +470,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
462 470
463 be_txq_notify(adapter, txq->id, wrb_cnt); 471 be_txq_notify(adapter, txq->id, wrb_cnt);
464 472
465 be_tx_stats_update(adapter, wrb_cnt, copied, stopped); 473 be_tx_stats_update(adapter, wrb_cnt, copied,
474 skb_shinfo(skb)->gso_segs, stopped);
466 } else { 475 } else {
467 txq->head = start; 476 txq->head = start;
468 dev_kfree_skb_any(skb); 477 dev_kfree_skb_any(skb);
@@ -474,10 +483,12 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
474{ 483{
475 struct be_adapter *adapter = netdev_priv(netdev); 484 struct be_adapter *adapter = netdev_priv(netdev);
476 if (new_mtu < BE_MIN_MTU || 485 if (new_mtu < BE_MIN_MTU ||
477 new_mtu > BE_MAX_JUMBO_FRAME_SIZE) { 486 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
487 (ETH_HLEN + ETH_FCS_LEN))) {
478 dev_info(&adapter->pdev->dev, 488 dev_info(&adapter->pdev->dev,
479 "MTU must be between %d and %d bytes\n", 489 "MTU must be between %d and %d bytes\n",
480 BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE); 490 BE_MIN_MTU,
491 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
481 return -EINVAL; 492 return -EINVAL;
482 } 493 }
483 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n", 494 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
@@ -487,17 +498,16 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
487} 498}
488 499
489/* 500/*
490 * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured, 501 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
491 * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured, 502 * If the user configures more, place BE in vlan promiscuous mode.
492 * set the BE in promiscuous VLAN mode.
493 */ 503 */
494static int be_vid_config(struct be_adapter *adapter) 504static int be_vid_config(struct be_adapter *adapter)
495{ 505{
496 u16 vtag[BE_NUM_VLANS_SUPPORTED]; 506 u16 vtag[BE_NUM_VLANS_SUPPORTED];
497 u16 ntags = 0, i; 507 u16 ntags = 0, i;
498 int status; 508 int status = 0;
499 509
500 if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) { 510 if (adapter->vlans_added <= adapter->max_vlans) {
501 /* Construct VLAN Table to give to HW */ 511 /* Construct VLAN Table to give to HW */
502 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 512 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
503 if (adapter->vlan_tag[i]) { 513 if (adapter->vlan_tag[i]) {
@@ -531,21 +541,21 @@ static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
531{ 541{
532 struct be_adapter *adapter = netdev_priv(netdev); 542 struct be_adapter *adapter = netdev_priv(netdev);
533 543
534 adapter->num_vlans++;
535 adapter->vlan_tag[vid] = 1; 544 adapter->vlan_tag[vid] = 1;
536 545 adapter->vlans_added++;
537 be_vid_config(adapter); 546 if (adapter->vlans_added <= (adapter->max_vlans + 1))
547 be_vid_config(adapter);
538} 548}
539 549
540static void be_vlan_rem_vid(struct net_device *netdev, u16 vid) 550static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
541{ 551{
542 struct be_adapter *adapter = netdev_priv(netdev); 552 struct be_adapter *adapter = netdev_priv(netdev);
543 553
544 adapter->num_vlans--;
545 adapter->vlan_tag[vid] = 0; 554 adapter->vlan_tag[vid] = 0;
546
547 vlan_group_set_device(adapter->vlan_grp, vid, NULL); 555 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
548 be_vid_config(adapter); 556 adapter->vlans_added--;
557 if (adapter->vlans_added <= adapter->max_vlans)
558 be_vid_config(adapter);
549} 559}
550 560
551static void be_set_multicast_list(struct net_device *netdev) 561static void be_set_multicast_list(struct net_device *netdev)
@@ -565,14 +575,15 @@ static void be_set_multicast_list(struct net_device *netdev)
565 } 575 }
566 576
567 /* Enable multicast promisc if num configured exceeds what we support */ 577 /* Enable multicast promisc if num configured exceeds what we support */
568 if (netdev->flags & IFF_ALLMULTI || netdev->mc_count > BE_MAX_MC) { 578 if (netdev->flags & IFF_ALLMULTI ||
569 be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0, 579 netdev_mc_count(netdev) > BE_MAX_MC) {
580 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
570 &adapter->mc_cmd_mem); 581 &adapter->mc_cmd_mem);
571 goto done; 582 goto done;
572 } 583 }
573 584
574 be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list, 585 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
575 netdev->mc_count, &adapter->mc_cmd_mem); 586 &adapter->mc_cmd_mem);
576done: 587done:
577 return; 588 return;
578} 589}
@@ -607,6 +618,7 @@ static void be_rx_stats_update(struct be_adapter *adapter,
607 stats->be_rx_compl++; 618 stats->be_rx_compl++;
608 stats->be_rx_frags += numfrags; 619 stats->be_rx_frags += numfrags;
609 stats->be_rx_bytes += pktsize; 620 stats->be_rx_bytes += pktsize;
621 stats->be_rx_pkts++;
610} 622}
611 623
612static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso) 624static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
@@ -634,9 +646,11 @@ get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
634 rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx]; 646 rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
635 BUG_ON(!rx_page_info->page); 647 BUG_ON(!rx_page_info->page);
636 648
637 if (rx_page_info->last_page_user) 649 if (rx_page_info->last_page_user) {
638 pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus), 650 pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
639 adapter->big_page_size, PCI_DMA_FROMDEVICE); 651 adapter->big_page_size, PCI_DMA_FROMDEVICE);
652 rx_page_info->last_page_user = false;
653 }
640 654
641 atomic_dec(&rxq->used); 655 atomic_dec(&rxq->used);
642 return rx_page_info; 656 return rx_page_info;
@@ -666,17 +680,17 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
666 * indicated by rxcp. 680 * indicated by rxcp.
667 */ 681 */
668static void skb_fill_rx_data(struct be_adapter *adapter, 682static void skb_fill_rx_data(struct be_adapter *adapter,
669 struct sk_buff *skb, struct be_eth_rx_compl *rxcp) 683 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
684 u16 num_rcvd)
670{ 685{
671 struct be_queue_info *rxq = &adapter->rx_obj.q; 686 struct be_queue_info *rxq = &adapter->rx_obj.q;
672 struct be_rx_page_info *page_info; 687 struct be_rx_page_info *page_info;
673 u16 rxq_idx, i, num_rcvd, j; 688 u16 rxq_idx, i, j;
674 u32 pktsize, hdr_len, curr_frag_len, size; 689 u32 pktsize, hdr_len, curr_frag_len, size;
675 u8 *start; 690 u8 *start;
676 691
677 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 692 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
678 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); 693 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
679 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
680 694
681 page_info = get_rx_page_info(adapter, rxq_idx); 695 page_info = get_rx_page_info(adapter, rxq_idx);
682 696
@@ -704,7 +718,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
704 skb->data_len = curr_frag_len - hdr_len; 718 skb->data_len = curr_frag_len - hdr_len;
705 skb->tail += hdr_len; 719 skb->tail += hdr_len;
706 } 720 }
707 memset(page_info, 0, sizeof(*page_info)); 721 page_info->page = NULL;
708 722
709 if (pktsize <= rx_frag_size) { 723 if (pktsize <= rx_frag_size) {
710 BUG_ON(num_rcvd != 1); 724 BUG_ON(num_rcvd != 1);
@@ -737,7 +751,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
737 skb->len += curr_frag_len; 751 skb->len += curr_frag_len;
738 skb->data_len += curr_frag_len; 752 skb->data_len += curr_frag_len;
739 753
740 memset(page_info, 0, sizeof(*page_info)); 754 page_info->page = NULL;
741 } 755 }
742 BUG_ON(j > MAX_SKB_FRAGS); 756 BUG_ON(j > MAX_SKB_FRAGS);
743 757
@@ -752,25 +766,23 @@ static void be_rx_compl_process(struct be_adapter *adapter,
752{ 766{
753 struct sk_buff *skb; 767 struct sk_buff *skb;
754 u32 vlanf, vid; 768 u32 vlanf, vid;
769 u16 num_rcvd;
755 u8 vtm; 770 u8 vtm;
756 771
757 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 772 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
758 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp); 773 /* Is it a flush compl that has no data */
759 774 if (unlikely(num_rcvd == 0))
760 /* vlanf could be wrongly set in some cards. 775 return;
761 * ignore if vtm is not set */
762 if ((adapter->cap & 0x400) && !vtm)
763 vlanf = 0;
764 776
765 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN); 777 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
766 if (!skb) { 778 if (unlikely(!skb)) {
767 if (net_ratelimit()) 779 if (net_ratelimit())
768 dev_warn(&adapter->pdev->dev, "skb alloc failed\n"); 780 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
769 be_rx_compl_discard(adapter, rxcp); 781 be_rx_compl_discard(adapter, rxcp);
770 return; 782 return;
771 } 783 }
772 784
773 skb_fill_rx_data(adapter, skb, rxcp); 785 skb_fill_rx_data(adapter, skb, rxcp, num_rcvd);
774 786
775 if (do_pkt_csum(rxcp, adapter->rx_csum)) 787 if (do_pkt_csum(rxcp, adapter->rx_csum))
776 skb->ip_summed = CHECKSUM_NONE; 788 skb->ip_summed = CHECKSUM_NONE;
@@ -781,8 +793,16 @@ static void be_rx_compl_process(struct be_adapter *adapter,
781 skb->protocol = eth_type_trans(skb, adapter->netdev); 793 skb->protocol = eth_type_trans(skb, adapter->netdev);
782 skb->dev = adapter->netdev; 794 skb->dev = adapter->netdev;
783 795
784 if (vlanf) { 796 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
785 if (!adapter->vlan_grp || adapter->num_vlans == 0) { 797 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
798
799 /* vlanf could be wrongly set in some cards.
800 * ignore if vtm is not set */
801 if ((adapter->cap & 0x400) && !vtm)
802 vlanf = 0;
803
804 if (unlikely(vlanf)) {
805 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
786 kfree_skb(skb); 806 kfree_skb(skb);
787 return; 807 return;
788 } 808 }
@@ -809,6 +829,10 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
809 u8 vtm; 829 u8 vtm;
810 830
811 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 831 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
832 /* Is it a flush compl that has no data */
833 if (unlikely(num_rcvd == 0))
834 return;
835
812 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); 836 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
813 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 837 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
814 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 838 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
@@ -862,7 +886,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
862 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); 886 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
863 vid = be16_to_cpu(vid); 887 vid = be16_to_cpu(vid);
864 888
865 if (!adapter->vlan_grp || adapter->num_vlans == 0) 889 if (!adapter->vlan_grp || adapter->vlans_added == 0)
866 return; 890 return;
867 891
868 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid); 892 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
@@ -1104,6 +1128,9 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
1104 struct be_queue_info *txq = &adapter->tx_obj.q; 1128 struct be_queue_info *txq = &adapter->tx_obj.q;
1105 struct be_eth_tx_compl *txcp; 1129 struct be_eth_tx_compl *txcp;
1106 u16 end_idx, cmpl = 0, timeo = 0; 1130 u16 end_idx, cmpl = 0, timeo = 0;
1131 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1132 struct sk_buff *sent_skb;
1133 bool dummy_wrb;
1107 1134
1108 /* Wait for a max of 200ms for all the tx-completions to arrive. */ 1135 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1109 do { 1136 do {
@@ -1127,6 +1154,15 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
1127 if (atomic_read(&txq->used)) 1154 if (atomic_read(&txq->used))
1128 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n", 1155 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1129 atomic_read(&txq->used)); 1156 atomic_read(&txq->used));
1157
1158 /* free posted tx for which compls will never arrive */
1159 while (atomic_read(&txq->used)) {
1160 sent_skb = sent_skbs[txq->tail];
1161 end_idx = txq->tail;
1162 index_adv(&end_idx,
1163 wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
1164 be_tx_compl_process(adapter, end_idx);
1165 }
1130} 1166}
1131 1167
1132static void be_mcc_queues_destroy(struct be_adapter *adapter) 1168static void be_mcc_queues_destroy(struct be_adapter *adapter)
@@ -1259,6 +1295,11 @@ static void be_rx_queues_destroy(struct be_adapter *adapter)
1259 q = &adapter->rx_obj.q; 1295 q = &adapter->rx_obj.q;
1260 if (q->created) { 1296 if (q->created) {
1261 be_cmd_q_destroy(adapter, q, QTYPE_RXQ); 1297 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1298
1299 /* After the rxq is invalidated, wait for a grace time
1300 * of 1ms for all dma to end and the flush compl to arrive
1301 */
1302 mdelay(1);
1262 be_rx_q_clean(adapter); 1303 be_rx_q_clean(adapter);
1263 } 1304 }
1264 be_queue_free(adapter, q); 1305 be_queue_free(adapter, q);
@@ -1428,23 +1469,38 @@ int be_poll_rx(struct napi_struct *napi, int budget)
1428 return work_done; 1469 return work_done;
1429} 1470}
1430 1471
1431void be_process_tx(struct be_adapter *adapter) 1472/* As TX and MCC share the same EQ check for both TX and MCC completions.
1473 * For TX/MCC we don't honour budget; consume everything
1474 */
1475static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1432{ 1476{
1477 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1478 struct be_adapter *adapter =
1479 container_of(tx_eq, struct be_adapter, tx_eq);
1433 struct be_queue_info *txq = &adapter->tx_obj.q; 1480 struct be_queue_info *txq = &adapter->tx_obj.q;
1434 struct be_queue_info *tx_cq = &adapter->tx_obj.cq; 1481 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1435 struct be_eth_tx_compl *txcp; 1482 struct be_eth_tx_compl *txcp;
1436 u32 num_cmpl = 0; 1483 int tx_compl = 0, mcc_compl, status = 0;
1437 u16 end_idx; 1484 u16 end_idx;
1438 1485
1439 while ((txcp = be_tx_compl_get(tx_cq))) { 1486 while ((txcp = be_tx_compl_get(tx_cq))) {
1440 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, 1487 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1441 wrb_index, txcp); 1488 wrb_index, txcp);
1442 be_tx_compl_process(adapter, end_idx); 1489 be_tx_compl_process(adapter, end_idx);
1443 num_cmpl++; 1490 tx_compl++;
1444 } 1491 }
1445 1492
1446 if (num_cmpl) { 1493 mcc_compl = be_process_mcc(adapter, &status);
1447 be_cq_notify(adapter, tx_cq->id, true, num_cmpl); 1494
1495 napi_complete(napi);
1496
1497 if (mcc_compl) {
1498 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1499 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1500 }
1501
1502 if (tx_compl) {
1503 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1448 1504
1449 /* As Tx wrbs have been freed up, wake up netdev queue if 1505 /* As Tx wrbs have been freed up, wake up netdev queue if
1450 * it was stopped due to lack of tx wrbs. 1506 * it was stopped due to lack of tx wrbs.
@@ -1455,24 +1511,8 @@ void be_process_tx(struct be_adapter *adapter)
1455 } 1511 }
1456 1512
1457 drvr_stats(adapter)->be_tx_events++; 1513 drvr_stats(adapter)->be_tx_events++;
1458 drvr_stats(adapter)->be_tx_compl += num_cmpl; 1514 drvr_stats(adapter)->be_tx_compl += tx_compl;
1459 } 1515 }
1460}
1461
1462/* As TX and MCC share the same EQ check for both TX and MCC completions.
1463 * For TX/MCC we don't honour budget; consume everything
1464 */
1465static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1466{
1467 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1468 struct be_adapter *adapter =
1469 container_of(tx_eq, struct be_adapter, tx_eq);
1470
1471 napi_complete(napi);
1472
1473 be_process_tx(adapter);
1474
1475 be_process_mcc(adapter);
1476 1516
1477 return 1; 1517 return 1;
1478} 1518}
@@ -1641,6 +1681,9 @@ static int be_open(struct net_device *netdev)
1641 /* Rx compl queue may be in unarmed state; rearm it */ 1681 /* Rx compl queue may be in unarmed state; rearm it */
1642 be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0); 1682 be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
1643 1683
1684 /* Now that interrupts are on we can process async mcc */
1685 be_async_mcc_enable(adapter);
1686
1644 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed, 1687 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
1645 &link_speed); 1688 &link_speed);
1646 if (status) 1689 if (status)
@@ -1766,6 +1809,8 @@ static int be_close(struct net_device *netdev)
1766 1809
1767 cancel_delayed_work_sync(&adapter->work); 1810 cancel_delayed_work_sync(&adapter->work);
1768 1811
1812 be_async_mcc_disable(adapter);
1813
1769 netif_stop_queue(netdev); 1814 netif_stop_queue(netdev);
1770 netif_carrier_off(netdev); 1815 netif_carrier_off(netdev);
1771 adapter->link_up = false; 1816 adapter->link_up = false;
@@ -1798,15 +1843,19 @@ char flash_cookie[2][16] = {"*** SE FLAS",
1798 "H DIRECTORY *** "}; 1843 "H DIRECTORY *** "};
1799 1844
1800static bool be_flash_redboot(struct be_adapter *adapter, 1845static bool be_flash_redboot(struct be_adapter *adapter,
1801 const u8 *p) 1846 const u8 *p, u32 img_start, int image_size,
1847 int hdr_size)
1802{ 1848{
1803 u32 crc_offset; 1849 u32 crc_offset;
1804 u8 flashed_crc[4]; 1850 u8 flashed_crc[4];
1805 int status; 1851 int status;
1806 crc_offset = FLASH_REDBOOT_START + FLASH_REDBOOT_IMAGE_MAX_SIZE - 4 1852
1807 + sizeof(struct flash_file_hdr) - 32*1024; 1853 crc_offset = hdr_size + img_start + image_size - 4;
1854
1808 p += crc_offset; 1855 p += crc_offset;
1809 status = be_cmd_get_flash_crc(adapter, flashed_crc); 1856
1857 status = be_cmd_get_flash_crc(adapter, flashed_crc,
1858 (img_start + image_size - 4));
1810 if (status) { 1859 if (status) {
1811 dev_err(&adapter->pdev->dev, 1860 dev_err(&adapter->pdev->dev,
1812 "could not get crc from flash, not flashing redboot\n"); 1861 "could not get crc from flash, not flashing redboot\n");
@@ -1818,102 +1867,124 @@ static bool be_flash_redboot(struct be_adapter *adapter,
1818 return false; 1867 return false;
1819 else 1868 else
1820 return true; 1869 return true;
1821
1822} 1870}
1823 1871
1824static int be_flash_image(struct be_adapter *adapter, 1872static int be_flash_data(struct be_adapter *adapter,
1825 const struct firmware *fw, 1873 const struct firmware *fw,
1826 struct be_dma_mem *flash_cmd, u32 flash_type) 1874 struct be_dma_mem *flash_cmd, int num_of_images)
1875
1827{ 1876{
1828 int status; 1877 int status = 0, i, filehdr_size = 0;
1829 u32 flash_op, image_offset = 0, total_bytes, image_size = 0; 1878 u32 total_bytes = 0, flash_op;
1830 int num_bytes; 1879 int num_bytes;
1831 const u8 *p = fw->data; 1880 const u8 *p = fw->data;
1832 struct be_cmd_write_flashrom *req = flash_cmd->va; 1881 struct be_cmd_write_flashrom *req = flash_cmd->va;
1833 1882 struct flash_comp *pflashcomp;
1834 switch (flash_type) { 1883
1835 case FLASHROM_TYPE_ISCSI_ACTIVE: 1884 struct flash_comp gen3_flash_types[8] = {
1836 image_offset = FLASH_iSCSI_PRIMARY_IMAGE_START; 1885 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
1837 image_size = FLASH_IMAGE_MAX_SIZE; 1886 FLASH_IMAGE_MAX_SIZE_g3},
1838 break; 1887 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
1839 case FLASHROM_TYPE_ISCSI_BACKUP: 1888 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
1840 image_offset = FLASH_iSCSI_BACKUP_IMAGE_START; 1889 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
1841 image_size = FLASH_IMAGE_MAX_SIZE; 1890 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
1842 break; 1891 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
1843 case FLASHROM_TYPE_FCOE_FW_ACTIVE: 1892 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
1844 image_offset = FLASH_FCoE_PRIMARY_IMAGE_START; 1893 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
1845 image_size = FLASH_IMAGE_MAX_SIZE; 1894 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
1846 break; 1895 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
1847 case FLASHROM_TYPE_FCOE_FW_BACKUP: 1896 FLASH_IMAGE_MAX_SIZE_g3},
1848 image_offset = FLASH_FCoE_BACKUP_IMAGE_START; 1897 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
1849 image_size = FLASH_IMAGE_MAX_SIZE; 1898 FLASH_IMAGE_MAX_SIZE_g3},
1850 break; 1899 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
1851 case FLASHROM_TYPE_BIOS: 1900 FLASH_IMAGE_MAX_SIZE_g3}
1852 image_offset = FLASH_iSCSI_BIOS_START; 1901 };
1853 image_size = FLASH_BIOS_IMAGE_MAX_SIZE; 1902 struct flash_comp gen2_flash_types[8] = {
1854 break; 1903 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
1855 case FLASHROM_TYPE_FCOE_BIOS: 1904 FLASH_IMAGE_MAX_SIZE_g2},
1856 image_offset = FLASH_FCoE_BIOS_START; 1905 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
1857 image_size = FLASH_BIOS_IMAGE_MAX_SIZE; 1906 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
1858 break; 1907 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
1859 case FLASHROM_TYPE_PXE_BIOS: 1908 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
1860 image_offset = FLASH_PXE_BIOS_START; 1909 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
1861 image_size = FLASH_BIOS_IMAGE_MAX_SIZE; 1910 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
1862 break; 1911 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
1863 case FLASHROM_TYPE_REDBOOT: 1912 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
1864 if (!be_flash_redboot(adapter, fw->data)) 1913 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
1865 return 0; 1914 FLASH_IMAGE_MAX_SIZE_g2},
1866 image_offset = FLASH_REDBOOT_ISM_START; 1915 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
1867 image_size = FLASH_REDBOOT_IMAGE_MAX_SIZE; 1916 FLASH_IMAGE_MAX_SIZE_g2},
1868 break; 1917 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
1869 default: 1918 FLASH_IMAGE_MAX_SIZE_g2}
1870 return 0; 1919 };
1920
1921 if (adapter->generation == BE_GEN3) {
1922 pflashcomp = gen3_flash_types;
1923 filehdr_size = sizeof(struct flash_file_hdr_g3);
1924 } else {
1925 pflashcomp = gen2_flash_types;
1926 filehdr_size = sizeof(struct flash_file_hdr_g2);
1871 } 1927 }
1872 1928 for (i = 0; i < 8; i++) {
1873 p += sizeof(struct flash_file_hdr) + image_offset; 1929 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
1874 if (p + image_size > fw->data + fw->size) 1930 (!be_flash_redboot(adapter, fw->data,
1931 pflashcomp[i].offset, pflashcomp[i].size,
1932 filehdr_size)))
1933 continue;
1934 p = fw->data;
1935 p += filehdr_size + pflashcomp[i].offset
1936 + (num_of_images * sizeof(struct image_hdr));
1937 if (p + pflashcomp[i].size > fw->data + fw->size)
1875 return -1; 1938 return -1;
1876 1939 total_bytes = pflashcomp[i].size;
1877 total_bytes = image_size; 1940 while (total_bytes) {
1878 1941 if (total_bytes > 32*1024)
1879 while (total_bytes) { 1942 num_bytes = 32*1024;
1880 if (total_bytes > 32*1024) 1943 else
1881 num_bytes = 32*1024; 1944 num_bytes = total_bytes;
1882 else 1945 total_bytes -= num_bytes;
1883 num_bytes = total_bytes; 1946
1884 total_bytes -= num_bytes; 1947 if (!total_bytes)
1885 1948 flash_op = FLASHROM_OPER_FLASH;
1886 if (!total_bytes) 1949 else
1887 flash_op = FLASHROM_OPER_FLASH; 1950 flash_op = FLASHROM_OPER_SAVE;
1888 else 1951 memcpy(req->params.data_buf, p, num_bytes);
1889 flash_op = FLASHROM_OPER_SAVE; 1952 p += num_bytes;
1890 memcpy(req->params.data_buf, p, num_bytes); 1953 status = be_cmd_write_flashrom(adapter, flash_cmd,
1891 p += num_bytes; 1954 pflashcomp[i].optype, flash_op, num_bytes);
1892 status = be_cmd_write_flashrom(adapter, flash_cmd, 1955 if (status) {
1893 flash_type, flash_op, num_bytes); 1956 dev_err(&adapter->pdev->dev,
1894 if (status) { 1957 "cmd to write to flash rom failed.\n");
1895 dev_err(&adapter->pdev->dev, 1958 return -1;
1896 "cmd to write to flash rom failed. type/op %d/%d\n", 1959 }
1897 flash_type, flash_op); 1960 yield();
1898 return -1;
1899 } 1961 }
1900 yield();
1901 } 1962 }
1902
1903 return 0; 1963 return 0;
1904} 1964}
1905 1965
1966static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
1967{
1968 if (fhdr == NULL)
1969 return 0;
1970 if (fhdr->build[0] == '3')
1971 return BE_GEN3;
1972 else if (fhdr->build[0] == '2')
1973 return BE_GEN2;
1974 else
1975 return 0;
1976}
1977
1906int be_load_fw(struct be_adapter *adapter, u8 *func) 1978int be_load_fw(struct be_adapter *adapter, u8 *func)
1907{ 1979{
1908 char fw_file[ETHTOOL_FLASH_MAX_FILENAME]; 1980 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
1909 const struct firmware *fw; 1981 const struct firmware *fw;
1910 struct flash_file_hdr *fhdr; 1982 struct flash_file_hdr_g2 *fhdr;
1911 struct flash_section_info *fsec = NULL; 1983 struct flash_file_hdr_g3 *fhdr3;
1984 struct image_hdr *img_hdr_ptr = NULL;
1912 struct be_dma_mem flash_cmd; 1985 struct be_dma_mem flash_cmd;
1913 int status; 1986 int status, i = 0;
1914 const u8 *p; 1987 const u8 *p;
1915 bool entry_found = false;
1916 int flash_type;
1917 char fw_ver[FW_VER_LEN]; 1988 char fw_ver[FW_VER_LEN];
1918 char fw_cfg; 1989 char fw_cfg;
1919 1990
@@ -1931,34 +2002,9 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
1931 goto fw_exit; 2002 goto fw_exit;
1932 2003
1933 p = fw->data; 2004 p = fw->data;
1934 fhdr = (struct flash_file_hdr *) p; 2005 fhdr = (struct flash_file_hdr_g2 *) p;
1935 if (memcmp(fhdr->sign, FW_FILE_HDR_SIGN, strlen(FW_FILE_HDR_SIGN))) {
1936 dev_err(&adapter->pdev->dev,
1937 "Firmware(%s) load error (signature did not match)\n",
1938 fw_file);
1939 status = -1;
1940 goto fw_exit;
1941 }
1942
1943 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file); 2006 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
1944 2007
1945 p += sizeof(struct flash_file_hdr);
1946 while (p < (fw->data + fw->size)) {
1947 fsec = (struct flash_section_info *)p;
1948 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) {
1949 entry_found = true;
1950 break;
1951 }
1952 p += 32;
1953 }
1954
1955 if (!entry_found) {
1956 status = -1;
1957 dev_err(&adapter->pdev->dev,
1958 "Flash cookie not found in firmware image\n");
1959 goto fw_exit;
1960 }
1961
1962 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024; 2008 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
1963 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size, 2009 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
1964 &flash_cmd.dma); 2010 &flash_cmd.dma);
@@ -1969,12 +2015,26 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
1969 goto fw_exit; 2015 goto fw_exit;
1970 } 2016 }
1971 2017
1972 for (flash_type = FLASHROM_TYPE_ISCSI_ACTIVE; 2018 if ((adapter->generation == BE_GEN3) &&
1973 flash_type <= FLASHROM_TYPE_FCOE_FW_BACKUP; flash_type++) { 2019 (get_ufigen_type(fhdr) == BE_GEN3)) {
1974 status = be_flash_image(adapter, fw, &flash_cmd, 2020 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
1975 flash_type); 2021 for (i = 0; i < fhdr3->num_imgs; i++) {
1976 if (status) 2022 img_hdr_ptr = (struct image_hdr *) (fw->data +
1977 break; 2023 (sizeof(struct flash_file_hdr_g3) +
2024 i * sizeof(struct image_hdr)));
2025 if (img_hdr_ptr->imageid == 1) {
2026 status = be_flash_data(adapter, fw,
2027 &flash_cmd, fhdr3->num_imgs);
2028 }
2029
2030 }
2031 } else if ((adapter->generation == BE_GEN2) &&
2032 (get_ufigen_type(fhdr) == BE_GEN2)) {
2033 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2034 } else {
2035 dev_err(&adapter->pdev->dev,
2036 "UFI and Interface are not compatible for flashing\n");
2037 status = -1;
1978 } 2038 }
1979 2039
1980 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va, 2040 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
@@ -2136,6 +2196,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
2136 spin_lock_init(&adapter->mcc_lock); 2196 spin_lock_init(&adapter->mcc_lock);
2137 spin_lock_init(&adapter->mcc_cq_lock); 2197 spin_lock_init(&adapter->mcc_cq_lock);
2138 2198
2199 pci_save_state(adapter->pdev);
2139 return 0; 2200 return 0;
2140 2201
2141free_mbox: 2202free_mbox:
@@ -2222,6 +2283,11 @@ static int be_get_config(struct be_adapter *adapter)
2222 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); 2283 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2223 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); 2284 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2224 2285
2286 if (adapter->cap & 0x400)
2287 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2288 else
2289 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2290
2225 return 0; 2291 return 0;
2226} 2292}
2227 2293
@@ -2394,13 +2460,123 @@ static int be_resume(struct pci_dev *pdev)
2394 return 0; 2460 return 0;
2395} 2461}
2396 2462
2463/*
2464 * An FLR will stop BE from DMAing any data.
2465 */
2466static void be_shutdown(struct pci_dev *pdev)
2467{
2468 struct be_adapter *adapter = pci_get_drvdata(pdev);
2469 struct net_device *netdev = adapter->netdev;
2470
2471 netif_device_detach(netdev);
2472
2473 be_cmd_reset_function(adapter);
2474
2475 if (adapter->wol)
2476 be_setup_wol(adapter, true);
2477
2478 pci_disable_device(pdev);
2479
2480 return;
2481}
2482
2483static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
2484 pci_channel_state_t state)
2485{
2486 struct be_adapter *adapter = pci_get_drvdata(pdev);
2487 struct net_device *netdev = adapter->netdev;
2488
2489 dev_err(&adapter->pdev->dev, "EEH error detected\n");
2490
2491 adapter->eeh_err = true;
2492
2493 netif_device_detach(netdev);
2494
2495 if (netif_running(netdev)) {
2496 rtnl_lock();
2497 be_close(netdev);
2498 rtnl_unlock();
2499 }
2500 be_clear(adapter);
2501
2502 if (state == pci_channel_io_perm_failure)
2503 return PCI_ERS_RESULT_DISCONNECT;
2504
2505 pci_disable_device(pdev);
2506
2507 return PCI_ERS_RESULT_NEED_RESET;
2508}
2509
2510static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
2511{
2512 struct be_adapter *adapter = pci_get_drvdata(pdev);
2513 int status;
2514
2515 dev_info(&adapter->pdev->dev, "EEH reset\n");
2516 adapter->eeh_err = false;
2517
2518 status = pci_enable_device(pdev);
2519 if (status)
2520 return PCI_ERS_RESULT_DISCONNECT;
2521
2522 pci_set_master(pdev);
2523 pci_set_power_state(pdev, 0);
2524 pci_restore_state(pdev);
2525
2526 /* Check if card is ok and fw is ready */
2527 status = be_cmd_POST(adapter);
2528 if (status)
2529 return PCI_ERS_RESULT_DISCONNECT;
2530
2531 return PCI_ERS_RESULT_RECOVERED;
2532}
2533
2534static void be_eeh_resume(struct pci_dev *pdev)
2535{
2536 int status = 0;
2537 struct be_adapter *adapter = pci_get_drvdata(pdev);
2538 struct net_device *netdev = adapter->netdev;
2539
2540 dev_info(&adapter->pdev->dev, "EEH resume\n");
2541
2542 pci_save_state(pdev);
2543
2544 /* tell fw we're ready to fire cmds */
2545 status = be_cmd_fw_init(adapter);
2546 if (status)
2547 goto err;
2548
2549 status = be_setup(adapter);
2550 if (status)
2551 goto err;
2552
2553 if (netif_running(netdev)) {
2554 status = be_open(netdev);
2555 if (status)
2556 goto err;
2557 }
2558 netif_device_attach(netdev);
2559 return;
2560err:
2561 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
2562 return;
2563}
2564
2565static struct pci_error_handlers be_eeh_handlers = {
2566 .error_detected = be_eeh_err_detected,
2567 .slot_reset = be_eeh_reset,
2568 .resume = be_eeh_resume,
2569};
2570
2397static struct pci_driver be_driver = { 2571static struct pci_driver be_driver = {
2398 .name = DRV_NAME, 2572 .name = DRV_NAME,
2399 .id_table = be_dev_ids, 2573 .id_table = be_dev_ids,
2400 .probe = be_probe, 2574 .probe = be_probe,
2401 .remove = be_remove, 2575 .remove = be_remove,
2402 .suspend = be_suspend, 2576 .suspend = be_suspend,
2403 .resume = be_resume 2577 .resume = be_resume,
2578 .shutdown = be_shutdown,
2579 .err_handler = &be_eeh_handlers
2404}; 2580};
2405 2581
2406static int __init be_init_module(void) 2582static int __init be_init_module(void)
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 0b23bc4f56c6..587f93cf03f6 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -812,16 +812,14 @@ static void bfin_mac_timeout(struct net_device *dev)
812static void bfin_mac_multicast_hash(struct net_device *dev) 812static void bfin_mac_multicast_hash(struct net_device *dev)
813{ 813{
814 u32 emac_hashhi, emac_hashlo; 814 u32 emac_hashhi, emac_hashlo;
815 struct dev_mc_list *dmi = dev->mc_list; 815 struct dev_mc_list *dmi;
816 char *addrs; 816 char *addrs;
817 int i;
818 u32 crc; 817 u32 crc;
819 818
820 emac_hashhi = emac_hashlo = 0; 819 emac_hashhi = emac_hashlo = 0;
821 820
822 for (i = 0; i < dev->mc_count; i++) { 821 netdev_for_each_mc_addr(dmi, dev) {
823 addrs = dmi->dmi_addr; 822 addrs = dmi->dmi_addr;
824 dmi = dmi->next;
825 823
826 /* skip non-multicast addresses */ 824 /* skip non-multicast addresses */
827 if (!(*addrs & 1)) 825 if (!(*addrs & 1))
@@ -862,7 +860,7 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
862 sysctl = bfin_read_EMAC_OPMODE(); 860 sysctl = bfin_read_EMAC_OPMODE();
863 sysctl |= PAM; 861 sysctl |= PAM;
864 bfin_write_EMAC_OPMODE(sysctl); 862 bfin_write_EMAC_OPMODE(sysctl);
865 } else if (dev->mc_count) { 863 } else if (!netdev_mc_empty(dev)) {
866 /* set up multicast hash table */ 864 /* set up multicast hash table */
867 sysctl = bfin_read_EMAC_OPMODE(); 865 sysctl = bfin_read_EMAC_OPMODE();
868 sysctl |= HM; 866 sysctl |= HM;
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 9b587c344194..119468e76323 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -973,7 +973,7 @@ static void bmac_set_multicast(struct net_device *dev)
973{ 973{
974 struct dev_mc_list *dmi; 974 struct dev_mc_list *dmi;
975 struct bmac_data *bp = netdev_priv(dev); 975 struct bmac_data *bp = netdev_priv(dev);
976 int num_addrs = dev->mc_count; 976 int num_addrs = netdev_mc_count(dev);
977 unsigned short rx_cfg; 977 unsigned short rx_cfg;
978 int i; 978 int i;
979 979
@@ -982,7 +982,7 @@ static void bmac_set_multicast(struct net_device *dev)
982 982
983 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs)); 983 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
984 984
985 if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) { 985 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
986 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff; 986 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
987 bmac_update_hash_table_mask(dev, bp); 987 bmac_update_hash_table_mask(dev, bp);
988 rx_cfg = bmac_rx_on(dev, 1, 0); 988 rx_cfg = bmac_rx_on(dev, 1, 0);
@@ -1000,7 +1000,7 @@ static void bmac_set_multicast(struct net_device *dev)
1000 rx_cfg = bmac_rx_on(dev, 0, 0); 1000 rx_cfg = bmac_rx_on(dev, 0, 0);
1001 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg)); 1001 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
1002 } else { 1002 } else {
1003 for (dmi=dev->mc_list; dmi!=NULL; dmi=dmi->next) 1003 netdev_for_each_mc_addr(dmi, dev)
1004 bmac_addhash(bp, dmi->dmi_addr); 1004 bmac_addhash(bp, dmi->dmi_addr);
1005 bmac_update_hash_table_mask(dev, bp); 1005 bmac_update_hash_table_mask(dev, bp);
1006 rx_cfg = bmac_rx_on(dev, 1, 0); 1006 rx_cfg = bmac_rx_on(dev, 1, 0);
@@ -1015,13 +1015,13 @@ static void bmac_set_multicast(struct net_device *dev)
1015 1015
1016static void bmac_set_multicast(struct net_device *dev) 1016static void bmac_set_multicast(struct net_device *dev)
1017{ 1017{
1018 struct dev_mc_list *dmi = dev->mc_list; 1018 struct dev_mc_list *dmi;
1019 char *addrs; 1019 char *addrs;
1020 int i; 1020 int i;
1021 unsigned short rx_cfg; 1021 unsigned short rx_cfg;
1022 u32 crc; 1022 u32 crc;
1023 1023
1024 if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) { 1024 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
1025 bmwrite(dev, BHASH0, 0xffff); 1025 bmwrite(dev, BHASH0, 0xffff);
1026 bmwrite(dev, BHASH1, 0xffff); 1026 bmwrite(dev, BHASH1, 0xffff);
1027 bmwrite(dev, BHASH2, 0xffff); 1027 bmwrite(dev, BHASH2, 0xffff);
@@ -1039,9 +1039,8 @@ static void bmac_set_multicast(struct net_device *dev)
1039 1039
1040 for(i = 0; i < 4; i++) hash_table[i] = 0; 1040 for(i = 0; i < 4; i++) hash_table[i] = 0;
1041 1041
1042 for(i = 0; i < dev->mc_count; i++) { 1042 netdev_for_each_mc_addr(dmi, dev) {
1043 addrs = dmi->dmi_addr; 1043 addrs = dmi->dmi_addr;
1044 dmi = dmi->next;
1045 1044
1046 if(!(*addrs & 1)) 1045 if(!(*addrs & 1))
1047 continue; 1046 continue;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 65df1de447e4..381887ba677c 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -1,6 +1,6 @@
1/* bnx2.c: Broadcom NX2 network driver. 1/* bnx2.c: Broadcom NX2 network driver.
2 * 2 *
3 * Copyright (c) 2004-2009 Broadcom Corporation 3 * Copyright (c) 2004-2010 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -9,6 +9,7 @@
9 * Written by: Michael Chan (mchan@broadcom.com) 9 * Written by: Michael Chan (mchan@broadcom.com)
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 13
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/moduleparam.h> 15#include <linux/moduleparam.h>
@@ -48,7 +49,6 @@
48#include <linux/cache.h> 49#include <linux/cache.h>
49#include <linux/firmware.h> 50#include <linux/firmware.h>
50#include <linux/log2.h> 51#include <linux/log2.h>
51#include <linux/list.h>
52 52
53#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) 53#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54#define BCM_CNIC 1 54#define BCM_CNIC 1
@@ -58,14 +58,13 @@
58#include "bnx2_fw.h" 58#include "bnx2_fw.h"
59 59
60#define DRV_MODULE_NAME "bnx2" 60#define DRV_MODULE_NAME "bnx2"
61#define PFX DRV_MODULE_NAME ": " 61#define DRV_MODULE_VERSION "2.0.8"
62#define DRV_MODULE_VERSION "2.0.3" 62#define DRV_MODULE_RELDATE "Feb 15, 2010"
63#define DRV_MODULE_RELDATE "Dec 03, 2009" 63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j3.fw"
65#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw" 64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
66#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j3.fw" 65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
67#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j3.fw" 66#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
68#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j3.fw" 67#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
69 68
70#define RUN_AT(x) (jiffies + (x)) 69#define RUN_AT(x) (jiffies + (x))
71 70
@@ -980,33 +979,27 @@ bnx2_report_link(struct bnx2 *bp)
980{ 979{
981 if (bp->link_up) { 980 if (bp->link_up) {
982 netif_carrier_on(bp->dev); 981 netif_carrier_on(bp->dev);
983 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name, 982 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
984 bnx2_xceiver_str(bp)); 983 bnx2_xceiver_str(bp),
985 984 bp->line_speed,
986 printk("%d Mbps ", bp->line_speed); 985 bp->duplex == DUPLEX_FULL ? "full" : "half");
987
988 if (bp->duplex == DUPLEX_FULL)
989 printk("full duplex");
990 else
991 printk("half duplex");
992 986
993 if (bp->flow_ctrl) { 987 if (bp->flow_ctrl) {
994 if (bp->flow_ctrl & FLOW_CTRL_RX) { 988 if (bp->flow_ctrl & FLOW_CTRL_RX) {
995 printk(", receive "); 989 pr_cont(", receive ");
996 if (bp->flow_ctrl & FLOW_CTRL_TX) 990 if (bp->flow_ctrl & FLOW_CTRL_TX)
997 printk("& transmit "); 991 pr_cont("& transmit ");
998 } 992 }
999 else { 993 else {
1000 printk(", transmit "); 994 pr_cont(", transmit ");
1001 } 995 }
1002 printk("flow control ON"); 996 pr_cont("flow control ON");
1003 } 997 }
1004 printk("\n"); 998 pr_cont("\n");
1005 } 999 } else {
1006 else {
1007 netif_carrier_off(bp->dev); 1000 netif_carrier_off(bp->dev);
1008 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name, 1001 netdev_err(bp->dev, "NIC %s Link is Down\n",
1009 bnx2_xceiver_str(bp)); 1002 bnx2_xceiver_str(bp));
1010 } 1003 }
1011 1004
1012 bnx2_report_fw_link(bp); 1005 bnx2_report_fw_link(bp);
@@ -1278,7 +1271,7 @@ bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1278 if (lo_water >= bp->rx_ring_size) 1271 if (lo_water >= bp->rx_ring_size)
1279 lo_water = 0; 1272 lo_water = 0;
1280 1273
1281 hi_water = bp->rx_ring_size / 4; 1274 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
1282 1275
1283 if (hi_water <= lo_water) 1276 if (hi_water <= lo_water)
1284 lo_water = 0; 1277 lo_water = 0;
@@ -2483,8 +2476,7 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2483 /* If we timed out, inform the firmware that this is the case. */ 2476 /* If we timed out, inform the firmware that this is the case. */
2484 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) { 2477 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2485 if (!silent) 2478 if (!silent)
2486 printk(KERN_ERR PFX "fw sync timeout, reset code = " 2479 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2487 "%x\n", msg_data);
2488 2480
2489 msg_data &= ~BNX2_DRV_MSG_CODE; 2481 msg_data &= ~BNX2_DRV_MSG_CODE;
2490 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT; 2482 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
@@ -2600,8 +2592,7 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2600 2592
2601 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL); 2593 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2602 if (good_mbuf == NULL) { 2594 if (good_mbuf == NULL) {
2603 printk(KERN_ERR PFX "Failed to allocate memory in " 2595 pr_err("Failed to allocate memory in %s\n", __func__);
2604 "bnx2_alloc_bad_rbuf\n");
2605 return -ENOMEM; 2596 return -ENOMEM;
2606 } 2597 }
2607 2598
@@ -3561,9 +3552,7 @@ bnx2_set_rx_mode(struct net_device *dev)
3561 3552
3562 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS); 3553 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3563 3554
3564 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 3555 netdev_for_each_mc_addr(mclist, dev) {
3565 i++, mclist = mclist->next) {
3566
3567 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr); 3556 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3568 bit = crc & 0xff; 3557 bit = crc & 0xff;
3569 regidx = (bit & 0xe0) >> 5; 3558 regidx = (bit & 0xe0) >> 5;
@@ -3579,14 +3568,14 @@ bnx2_set_rx_mode(struct net_device *dev)
3579 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN; 3568 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3580 } 3569 }
3581 3570
3582 if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) { 3571 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3583 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS; 3572 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3584 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN | 3573 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3585 BNX2_RPM_SORT_USER0_PROM_VLAN; 3574 BNX2_RPM_SORT_USER0_PROM_VLAN;
3586 } else if (!(dev->flags & IFF_PROMISC)) { 3575 } else if (!(dev->flags & IFF_PROMISC)) {
3587 /* Add all entries into to the match filter list */ 3576 /* Add all entries into to the match filter list */
3588 i = 0; 3577 i = 0;
3589 list_for_each_entry(ha, &dev->uc.list, list) { 3578 netdev_for_each_uc_addr(ha, dev) {
3590 bnx2_set_mac_addr(bp, ha->addr, 3579 bnx2_set_mac_addr(bp, ha->addr,
3591 i + BNX2_START_UNICAST_ADDRESS_INDEX); 3580 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3592 sort_mode |= (1 << 3581 sort_mode |= (1 <<
@@ -3657,15 +3646,13 @@ bnx2_request_firmware(struct bnx2 *bp)
3657 3646
3658 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev); 3647 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3659 if (rc) { 3648 if (rc) {
3660 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n", 3649 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3661 mips_fw_file);
3662 return rc; 3650 return rc;
3663 } 3651 }
3664 3652
3665 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev); 3653 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3666 if (rc) { 3654 if (rc) {
3667 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n", 3655 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3668 rv2p_fw_file);
3669 return rc; 3656 return rc;
3670 } 3657 }
3671 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data; 3658 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
@@ -3676,15 +3663,13 @@ bnx2_request_firmware(struct bnx2 *bp)
3676 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) || 3663 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3677 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) || 3664 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3678 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) { 3665 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3679 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n", 3666 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3680 mips_fw_file);
3681 return -EINVAL; 3667 return -EINVAL;
3682 } 3668 }
3683 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) || 3669 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3684 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) || 3670 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3685 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) { 3671 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3686 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n", 3672 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3687 rv2p_fw_file);
3688 return -EINVAL; 3673 return -EINVAL;
3689 } 3674 }
3690 3675
@@ -4318,7 +4303,7 @@ bnx2_init_nvram(struct bnx2 *bp)
4318 4303
4319 if (j == entry_count) { 4304 if (j == entry_count) {
4320 bp->flash_info = NULL; 4305 bp->flash_info = NULL;
4321 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n"); 4306 pr_alert("Unknown flash/EEPROM type\n");
4322 return -ENODEV; 4307 return -ENODEV;
4323 } 4308 }
4324 4309
@@ -4738,7 +4723,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4738 4723
4739 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | 4724 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4740 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 4725 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4741 printk(KERN_ERR PFX "Chip reset did not complete\n"); 4726 pr_err("Chip reset did not complete\n");
4742 return -EBUSY; 4727 return -EBUSY;
4743 } 4728 }
4744 } 4729 }
@@ -4746,7 +4731,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4746 /* Make sure byte swapping is properly configured. */ 4731 /* Make sure byte swapping is properly configured. */
4747 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0); 4732 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4748 if (val != 0x01020304) { 4733 if (val != 0x01020304) {
4749 printk(KERN_ERR PFX "Chip not in correct endian mode\n"); 4734 pr_err("Chip not in correct endian mode\n");
4750 return -ENODEV; 4735 return -ENODEV;
4751 } 4736 }
4752 4737
@@ -4941,7 +4926,7 @@ bnx2_init_chip(struct bnx2 *bp)
4941 BNX2_HC_CONFIG_COLLECT_STATS; 4926 BNX2_HC_CONFIG_COLLECT_STATS;
4942 } 4927 }
4943 4928
4944 if (bp->irq_nvecs > 1) { 4929 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4945 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR, 4930 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4946 BNX2_HC_MSIX_BIT_VECTOR_VAL); 4931 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4947 4932
@@ -5167,9 +5152,8 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5167 ring_prod = prod = rxr->rx_pg_prod; 5152 ring_prod = prod = rxr->rx_pg_prod;
5168 for (i = 0; i < bp->rx_pg_ring_size; i++) { 5153 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5169 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) { 5154 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
5170 printk(KERN_WARNING PFX "%s: init'ed rx page ring %d " 5155 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5171 "with %d/%d pages only\n", 5156 ring_num, i, bp->rx_pg_ring_size);
5172 bp->dev->name, ring_num, i, bp->rx_pg_ring_size);
5173 break; 5157 break;
5174 } 5158 }
5175 prod = NEXT_RX_BD(prod); 5159 prod = NEXT_RX_BD(prod);
@@ -5180,9 +5164,8 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5180 ring_prod = prod = rxr->rx_prod; 5164 ring_prod = prod = rxr->rx_prod;
5181 for (i = 0; i < bp->rx_ring_size; i++) { 5165 for (i = 0; i < bp->rx_ring_size; i++) {
5182 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) { 5166 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
5183 printk(KERN_WARNING PFX "%s: init'ed rx ring %d with " 5167 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5184 "%d/%d skbs only\n", 5168 ring_num, i, bp->rx_ring_size);
5185 bp->dev->name, ring_num, i, bp->rx_ring_size);
5186 break; 5169 break;
5187 } 5170 }
5188 prod = NEXT_RX_BD(prod); 5171 prod = NEXT_RX_BD(prod);
@@ -6145,6 +6128,10 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6145 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE); 6128 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6146 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE); 6129 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6147 6130
6131 /* Need to flush the previous three writes to ensure MSI-X
6132 * is setup properly */
6133 REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6134
6148 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { 6135 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6149 msix_ent[i].entry = i; 6136 msix_ent[i].entry = i;
6150 msix_ent[i].vector = 0; 6137 msix_ent[i].vector = 0;
@@ -6227,6 +6214,8 @@ bnx2_open(struct net_device *dev)
6227 6214
6228 atomic_set(&bp->intr_sem, 0); 6215 atomic_set(&bp->intr_sem, 0);
6229 6216
6217 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6218
6230 bnx2_enable_int(bp); 6219 bnx2_enable_int(bp);
6231 6220
6232 if (bp->flags & BNX2_FLAG_USING_MSI) { 6221 if (bp->flags & BNX2_FLAG_USING_MSI) {
@@ -6234,11 +6223,7 @@ bnx2_open(struct net_device *dev)
6234 * If MSI test fails, go back to INTx mode 6223 * If MSI test fails, go back to INTx mode
6235 */ 6224 */
6236 if (bnx2_test_intr(bp) != 0) { 6225 if (bnx2_test_intr(bp) != 0) {
6237 printk(KERN_WARNING PFX "%s: No interrupt was generated" 6226 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6238 " using MSI, switching to INTx mode. Please"
6239 " report this failure to the PCI maintainer"
6240 " and include system chipset information.\n",
6241 bp->dev->name);
6242 6227
6243 bnx2_disable_int(bp); 6228 bnx2_disable_int(bp);
6244 bnx2_free_irq(bp); 6229 bnx2_free_irq(bp);
@@ -6258,9 +6243,9 @@ bnx2_open(struct net_device *dev)
6258 } 6243 }
6259 } 6244 }
6260 if (bp->flags & BNX2_FLAG_USING_MSI) 6245 if (bp->flags & BNX2_FLAG_USING_MSI)
6261 printk(KERN_INFO PFX "%s: using MSI\n", dev->name); 6246 netdev_info(dev, "using MSI\n");
6262 else if (bp->flags & BNX2_FLAG_USING_MSIX) 6247 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6263 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name); 6248 netdev_info(dev, "using MSIX\n");
6264 6249
6265 netif_tx_start_all_queues(dev); 6250 netif_tx_start_all_queues(dev);
6266 6251
@@ -6299,20 +6284,18 @@ bnx2_dump_state(struct bnx2 *bp)
6299{ 6284{
6300 struct net_device *dev = bp->dev; 6285 struct net_device *dev = bp->dev;
6301 6286
6302 printk(KERN_ERR PFX "%s DEBUG: intr_sem[%x]\n", dev->name, 6287 netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
6303 atomic_read(&bp->intr_sem)); 6288 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] RPM_MGMT_PKT_CTRL[%08x]\n",
6304 printk(KERN_ERR PFX "%s DEBUG: EMAC_TX_STATUS[%08x] " 6289 REG_RD(bp, BNX2_EMAC_TX_STATUS),
6305 "RPM_MGMT_PKT_CTRL[%08x]\n", dev->name, 6290 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6306 REG_RD(bp, BNX2_EMAC_TX_STATUS), 6291 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6307 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL)); 6292 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
6308 printk(KERN_ERR PFX "%s DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n", 6293 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
6309 dev->name, bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0), 6294 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6310 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1)); 6295 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6311 printk(KERN_ERR PFX "%s DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6312 dev->name, REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6313 if (bp->flags & BNX2_FLAG_USING_MSIX) 6296 if (bp->flags & BNX2_FLAG_USING_MSIX)
6314 printk(KERN_ERR PFX "%s DEBUG: PBA[%08x]\n", dev->name, 6297 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6315 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE)); 6298 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6316} 6299}
6317 6300
6318static void 6301static void
@@ -6376,8 +6359,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6376 if (unlikely(bnx2_tx_avail(bp, txr) < 6359 if (unlikely(bnx2_tx_avail(bp, txr) <
6377 (skb_shinfo(skb)->nr_frags + 1))) { 6360 (skb_shinfo(skb)->nr_frags + 1))) {
6378 netif_tx_stop_queue(txq); 6361 netif_tx_stop_queue(txq);
6379 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n", 6362 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6380 dev->name);
6381 6363
6382 return NETDEV_TX_BUSY; 6364 return NETDEV_TX_BUSY;
6383 } 6365 }
@@ -6538,92 +6520,121 @@ bnx2_close(struct net_device *dev)
6538 return 0; 6520 return 0;
6539} 6521}
6540 6522
6541#define GET_NET_STATS64(ctr) \ 6523static void
6524bnx2_save_stats(struct bnx2 *bp)
6525{
6526 u32 *hw_stats = (u32 *) bp->stats_blk;
6527 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6528 int i;
6529
6530 /* The 1st 10 counters are 64-bit counters */
6531 for (i = 0; i < 20; i += 2) {
6532 u32 hi;
6533 u64 lo;
6534
6535 hi = temp_stats[i] + hw_stats[i];
6536 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6537 if (lo > 0xffffffff)
6538 hi++;
6539 temp_stats[i] = hi;
6540 temp_stats[i + 1] = lo & 0xffffffff;
6541 }
6542
6543 for ( ; i < sizeof(struct statistics_block) / 4; i++)
6544 temp_stats[i] += hw_stats[i];
6545}
6546
6547#define GET_64BIT_NET_STATS64(ctr) \
6542 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \ 6548 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6543 (unsigned long) (ctr##_lo) 6549 (unsigned long) (ctr##_lo)
6544 6550
6545#define GET_NET_STATS32(ctr) \ 6551#define GET_64BIT_NET_STATS32(ctr) \
6546 (ctr##_lo) 6552 (ctr##_lo)
6547 6553
6548#if (BITS_PER_LONG == 64) 6554#if (BITS_PER_LONG == 64)
6549#define GET_NET_STATS GET_NET_STATS64 6555#define GET_64BIT_NET_STATS(ctr) \
6556 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6557 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6550#else 6558#else
6551#define GET_NET_STATS GET_NET_STATS32 6559#define GET_64BIT_NET_STATS(ctr) \
6560 GET_64BIT_NET_STATS32(bp->stats_blk->ctr) + \
6561 GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
6552#endif 6562#endif
6553 6563
6564#define GET_32BIT_NET_STATS(ctr) \
6565 (unsigned long) (bp->stats_blk->ctr + \
6566 bp->temp_stats_blk->ctr)
6567
6554static struct net_device_stats * 6568static struct net_device_stats *
6555bnx2_get_stats(struct net_device *dev) 6569bnx2_get_stats(struct net_device *dev)
6556{ 6570{
6557 struct bnx2 *bp = netdev_priv(dev); 6571 struct bnx2 *bp = netdev_priv(dev);
6558 struct statistics_block *stats_blk = bp->stats_blk;
6559 struct net_device_stats *net_stats = &dev->stats; 6572 struct net_device_stats *net_stats = &dev->stats;
6560 6573
6561 if (bp->stats_blk == NULL) { 6574 if (bp->stats_blk == NULL) {
6562 return net_stats; 6575 return net_stats;
6563 } 6576 }
6564 net_stats->rx_packets = 6577 net_stats->rx_packets =
6565 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) + 6578 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6566 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) + 6579 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6567 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts); 6580 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6568 6581
6569 net_stats->tx_packets = 6582 net_stats->tx_packets =
6570 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) + 6583 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6571 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) + 6584 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6572 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts); 6585 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6573 6586
6574 net_stats->rx_bytes = 6587 net_stats->rx_bytes =
6575 GET_NET_STATS(stats_blk->stat_IfHCInOctets); 6588 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6576 6589
6577 net_stats->tx_bytes = 6590 net_stats->tx_bytes =
6578 GET_NET_STATS(stats_blk->stat_IfHCOutOctets); 6591 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6579 6592
6580 net_stats->multicast = 6593 net_stats->multicast =
6581 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts); 6594 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
6582 6595
6583 net_stats->collisions = 6596 net_stats->collisions =
6584 (unsigned long) stats_blk->stat_EtherStatsCollisions; 6597 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6585 6598
6586 net_stats->rx_length_errors = 6599 net_stats->rx_length_errors =
6587 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts + 6600 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6588 stats_blk->stat_EtherStatsOverrsizePkts); 6601 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6589 6602
6590 net_stats->rx_over_errors = 6603 net_stats->rx_over_errors =
6591 (unsigned long) (stats_blk->stat_IfInFTQDiscards + 6604 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6592 stats_blk->stat_IfInMBUFDiscards); 6605 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6593 6606
6594 net_stats->rx_frame_errors = 6607 net_stats->rx_frame_errors =
6595 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors; 6608 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6596 6609
6597 net_stats->rx_crc_errors = 6610 net_stats->rx_crc_errors =
6598 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors; 6611 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6599 6612
6600 net_stats->rx_errors = net_stats->rx_length_errors + 6613 net_stats->rx_errors = net_stats->rx_length_errors +
6601 net_stats->rx_over_errors + net_stats->rx_frame_errors + 6614 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6602 net_stats->rx_crc_errors; 6615 net_stats->rx_crc_errors;
6603 6616
6604 net_stats->tx_aborted_errors = 6617 net_stats->tx_aborted_errors =
6605 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions + 6618 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6606 stats_blk->stat_Dot3StatsLateCollisions); 6619 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6607 6620
6608 if ((CHIP_NUM(bp) == CHIP_NUM_5706) || 6621 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6609 (CHIP_ID(bp) == CHIP_ID_5708_A0)) 6622 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6610 net_stats->tx_carrier_errors = 0; 6623 net_stats->tx_carrier_errors = 0;
6611 else { 6624 else {
6612 net_stats->tx_carrier_errors = 6625 net_stats->tx_carrier_errors =
6613 (unsigned long) 6626 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6614 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6615 } 6627 }
6616 6628
6617 net_stats->tx_errors = 6629 net_stats->tx_errors =
6618 (unsigned long) 6630 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6619 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6620 +
6621 net_stats->tx_aborted_errors + 6631 net_stats->tx_aborted_errors +
6622 net_stats->tx_carrier_errors; 6632 net_stats->tx_carrier_errors;
6623 6633
6624 net_stats->rx_missed_errors = 6634 net_stats->rx_missed_errors =
6625 (unsigned long) (stats_blk->stat_IfInFTQDiscards + 6635 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6626 stats_blk->stat_IfInMBUFDiscards + stats_blk->stat_FwRxDrop); 6636 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6637 GET_32BIT_NET_STATS(stat_FwRxDrop);
6627 6638
6628 return net_stats; 6639 return net_stats;
6629} 6640}
@@ -6717,32 +6728,15 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6717 if (cmd->autoneg == AUTONEG_ENABLE) { 6728 if (cmd->autoneg == AUTONEG_ENABLE) {
6718 autoneg |= AUTONEG_SPEED; 6729 autoneg |= AUTONEG_SPEED;
6719 6730
6720 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED; 6731 advertising = cmd->advertising;
6721 6732 if (cmd->port == PORT_TP) {
6722 /* allow advertising 1 speed */ 6733 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6723 if ((cmd->advertising == ADVERTISED_10baseT_Half) || 6734 if (!advertising)
6724 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6725 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6726 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6727
6728 if (cmd->port == PORT_FIBRE)
6729 goto err_out_unlock;
6730
6731 advertising = cmd->advertising;
6732
6733 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6734 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6735 (cmd->port == PORT_TP))
6736 goto err_out_unlock;
6737 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6738 advertising = cmd->advertising;
6739 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6740 goto err_out_unlock;
6741 else {
6742 if (cmd->port == PORT_FIBRE)
6743 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6744 else
6745 advertising = ETHTOOL_ALL_COPPER_SPEED; 6735 advertising = ETHTOOL_ALL_COPPER_SPEED;
6736 } else {
6737 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6738 if (!advertising)
6739 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6746 } 6740 }
6747 advertising |= ADVERTISED_Autoneg; 6741 advertising |= ADVERTISED_Autoneg;
6748 } 6742 }
@@ -7083,6 +7077,9 @@ static int
7083bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx) 7077bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7084{ 7078{
7085 if (netif_running(bp->dev)) { 7079 if (netif_running(bp->dev)) {
7080 /* Reset will erase chipset stats; save them */
7081 bnx2_save_stats(bp);
7082
7086 bnx2_netif_stop(bp); 7083 bnx2_netif_stop(bp);
7087 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); 7084 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7088 bnx2_free_skbs(bp); 7085 bnx2_free_skbs(bp);
@@ -7104,6 +7101,13 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7104 dev_close(bp->dev); 7101 dev_close(bp->dev);
7105 return rc; 7102 return rc;
7106 } 7103 }
7104#ifdef BCM_CNIC
7105 mutex_lock(&bp->cnic_lock);
7106 /* Let cnic know about the new status block. */
7107 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7108 bnx2_setup_cnic_irq_info(bp);
7109 mutex_unlock(&bp->cnic_lock);
7110#endif
7107 bnx2_netif_start(bp); 7111 bnx2_netif_start(bp);
7108 } 7112 }
7109 return 0; 7113 return 0;
@@ -7427,6 +7431,7 @@ bnx2_get_ethtool_stats(struct net_device *dev,
7427 struct bnx2 *bp = netdev_priv(dev); 7431 struct bnx2 *bp = netdev_priv(dev);
7428 int i; 7432 int i;
7429 u32 *hw_stats = (u32 *) bp->stats_blk; 7433 u32 *hw_stats = (u32 *) bp->stats_blk;
7434 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7430 u8 *stats_len_arr = NULL; 7435 u8 *stats_len_arr = NULL;
7431 7436
7432 if (hw_stats == NULL) { 7437 if (hw_stats == NULL) {
@@ -7443,21 +7448,26 @@ bnx2_get_ethtool_stats(struct net_device *dev,
7443 stats_len_arr = bnx2_5708_stats_len_arr; 7448 stats_len_arr = bnx2_5708_stats_len_arr;
7444 7449
7445 for (i = 0; i < BNX2_NUM_STATS; i++) { 7450 for (i = 0; i < BNX2_NUM_STATS; i++) {
7451 unsigned long offset;
7452
7446 if (stats_len_arr[i] == 0) { 7453 if (stats_len_arr[i] == 0) {
7447 /* skip this counter */ 7454 /* skip this counter */
7448 buf[i] = 0; 7455 buf[i] = 0;
7449 continue; 7456 continue;
7450 } 7457 }
7458
7459 offset = bnx2_stats_offset_arr[i];
7451 if (stats_len_arr[i] == 4) { 7460 if (stats_len_arr[i] == 4) {
7452 /* 4-byte counter */ 7461 /* 4-byte counter */
7453 buf[i] = (u64) 7462 buf[i] = (u64) *(hw_stats + offset) +
7454 *(hw_stats + bnx2_stats_offset_arr[i]); 7463 *(temp_stats + offset);
7455 continue; 7464 continue;
7456 } 7465 }
7457 /* 8-byte counter */ 7466 /* 8-byte counter */
7458 buf[i] = (((u64) *(hw_stats + 7467 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7459 bnx2_stats_offset_arr[i])) << 32) + 7468 *(hw_stats + offset + 1) +
7460 *(hw_stats + bnx2_stats_offset_arr[i] + 1); 7469 (((u64) *(temp_stats + offset)) << 32) +
7470 *(temp_stats + offset + 1);
7461 } 7471 }
7462} 7472}
7463 7473
@@ -7625,7 +7635,7 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu)
7625 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size)); 7635 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7626} 7636}
7627 7637
7628#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) 7638#ifdef CONFIG_NET_POLL_CONTROLLER
7629static void 7639static void
7630poll_bnx2(struct net_device *dev) 7640poll_bnx2(struct net_device *dev)
7631{ 7641{
@@ -7733,10 +7743,9 @@ bnx2_get_pci_speed(struct bnx2 *bp)
7733static void __devinit 7743static void __devinit
7734bnx2_read_vpd_fw_ver(struct bnx2 *bp) 7744bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7735{ 7745{
7736 int rc, i, v0_len = 0; 7746 int rc, i, j;
7737 u8 *data; 7747 u8 *data;
7738 u8 *v0_str = NULL; 7748 unsigned int block_end, rosize, len;
7739 bool mn_match = false;
7740 7749
7741#define BNX2_VPD_NVRAM_OFFSET 0x300 7750#define BNX2_VPD_NVRAM_OFFSET 0x300
7742#define BNX2_VPD_LEN 128 7751#define BNX2_VPD_LEN 128
@@ -7758,53 +7767,42 @@ bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7758 data[i + 3] = data[i + BNX2_VPD_LEN]; 7767 data[i + 3] = data[i + BNX2_VPD_LEN];
7759 } 7768 }
7760 7769
7761 for (i = 0; i <= BNX2_VPD_LEN - 3; ) { 7770 i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7762 unsigned char val = data[i]; 7771 if (i < 0)
7763 unsigned int block_end; 7772 goto vpd_done;
7764
7765 if (val == 0x82 || val == 0x91) {
7766 i = (i + 3 + (data[i + 1] + (data[i + 2] << 8)));
7767 continue;
7768 }
7769
7770 if (val != 0x90)
7771 goto vpd_done;
7772 7773
7773 block_end = (i + 3 + (data[i + 1] + (data[i + 2] << 8))); 7774 rosize = pci_vpd_lrdt_size(&data[i]);
7774 i += 3; 7775 i += PCI_VPD_LRDT_TAG_SIZE;
7776 block_end = i + rosize;
7775 7777
7776 if (block_end > BNX2_VPD_LEN) 7778 if (block_end > BNX2_VPD_LEN)
7777 goto vpd_done; 7779 goto vpd_done;
7778 7780
7779 while (i < (block_end - 2)) { 7781 j = pci_vpd_find_info_keyword(data, i, rosize,
7780 int len = data[i + 2]; 7782 PCI_VPD_RO_KEYWORD_MFR_ID);
7783 if (j < 0)
7784 goto vpd_done;
7781 7785
7782 if (i + 3 + len > block_end) 7786 len = pci_vpd_info_field_size(&data[j]);
7783 goto vpd_done;
7784 7787
7785 if (data[i] == 'M' && data[i + 1] == 'N') { 7788 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7786 if (len != 4 || 7789 if (j + len > block_end || len != 4 ||
7787 memcmp(&data[i + 3], "1028", 4)) 7790 memcmp(&data[j], "1028", 4))
7788 goto vpd_done; 7791 goto vpd_done;
7789 mn_match = true;
7790 7792
7791 } else if (data[i] == 'V' && data[i + 1] == '0') { 7793 j = pci_vpd_find_info_keyword(data, i, rosize,
7792 if (len > BNX2_MAX_VER_SLEN) 7794 PCI_VPD_RO_KEYWORD_VENDOR0);
7793 goto vpd_done; 7795 if (j < 0)
7796 goto vpd_done;
7794 7797
7795 v0_len = len; 7798 len = pci_vpd_info_field_size(&data[j]);
7796 v0_str = &data[i + 3];
7797 }
7798 i += 3 + len;
7799 7799
7800 if (mn_match && v0_str) { 7800 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7801 memcpy(bp->fw_version, v0_str, v0_len); 7801 if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7802 bp->fw_version[v0_len] = ' ';
7803 goto vpd_done;
7804 }
7805 }
7806 goto vpd_done; 7802 goto vpd_done;
7807 } 7803
7804 memcpy(bp->fw_version, &data[j], len);
7805 bp->fw_version[len] = ' ';
7808 7806
7809vpd_done: 7807vpd_done:
7810 kfree(data); 7808 kfree(data);
@@ -7825,23 +7823,31 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7825 bp->flags = 0; 7823 bp->flags = 0;
7826 bp->phy_flags = 0; 7824 bp->phy_flags = 0;
7827 7825
7826 bp->temp_stats_blk =
7827 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7828
7829 if (bp->temp_stats_blk == NULL) {
7830 rc = -ENOMEM;
7831 goto err_out;
7832 }
7833
7828 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 7834 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7829 rc = pci_enable_device(pdev); 7835 rc = pci_enable_device(pdev);
7830 if (rc) { 7836 if (rc) {
7831 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n"); 7837 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7832 goto err_out; 7838 goto err_out;
7833 } 7839 }
7834 7840
7835 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 7841 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7836 dev_err(&pdev->dev, 7842 dev_err(&pdev->dev,
7837 "Cannot find PCI device base address, aborting.\n"); 7843 "Cannot find PCI device base address, aborting\n");
7838 rc = -ENODEV; 7844 rc = -ENODEV;
7839 goto err_out_disable; 7845 goto err_out_disable;
7840 } 7846 }
7841 7847
7842 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 7848 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7843 if (rc) { 7849 if (rc) {
7844 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n"); 7850 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7845 goto err_out_disable; 7851 goto err_out_disable;
7846 } 7852 }
7847 7853
@@ -7851,7 +7857,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7851 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 7857 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7852 if (bp->pm_cap == 0) { 7858 if (bp->pm_cap == 0) {
7853 dev_err(&pdev->dev, 7859 dev_err(&pdev->dev,
7854 "Cannot find power management capability, aborting.\n"); 7860 "Cannot find power management capability, aborting\n");
7855 rc = -EIO; 7861 rc = -EIO;
7856 goto err_out_release; 7862 goto err_out_release;
7857 } 7863 }
@@ -7874,7 +7880,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7874 bp->regview = ioremap_nocache(dev->base_addr, mem_len); 7880 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7875 7881
7876 if (!bp->regview) { 7882 if (!bp->regview) {
7877 dev_err(&pdev->dev, "Cannot map register space, aborting.\n"); 7883 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7878 rc = -ENOMEM; 7884 rc = -ENOMEM;
7879 goto err_out_release; 7885 goto err_out_release;
7880 } 7886 }
@@ -7894,7 +7900,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7894 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 7900 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7895 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) { 7901 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7896 dev_err(&pdev->dev, 7902 dev_err(&pdev->dev,
7897 "Cannot find PCIE capability, aborting.\n"); 7903 "Cannot find PCIE capability, aborting\n");
7898 rc = -EIO; 7904 rc = -EIO;
7899 goto err_out_unmap; 7905 goto err_out_unmap;
7900 } 7906 }
@@ -7905,7 +7911,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7905 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); 7911 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7906 if (bp->pcix_cap == 0) { 7912 if (bp->pcix_cap == 0) {
7907 dev_err(&pdev->dev, 7913 dev_err(&pdev->dev,
7908 "Cannot find PCIX capability, aborting.\n"); 7914 "Cannot find PCIX capability, aborting\n");
7909 rc = -EIO; 7915 rc = -EIO;
7910 goto err_out_unmap; 7916 goto err_out_unmap;
7911 } 7917 }
@@ -7934,11 +7940,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7934 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask); 7940 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7935 if (rc) { 7941 if (rc) {
7936 dev_err(&pdev->dev, 7942 dev_err(&pdev->dev,
7937 "pci_set_consistent_dma_mask failed, aborting.\n"); 7943 "pci_set_consistent_dma_mask failed, aborting\n");
7938 goto err_out_unmap; 7944 goto err_out_unmap;
7939 } 7945 }
7940 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { 7946 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7941 dev_err(&pdev->dev, "System does not support DMA, aborting.\n"); 7947 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
7942 goto err_out_unmap; 7948 goto err_out_unmap;
7943 } 7949 }
7944 7950
@@ -7955,7 +7961,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7955 !(bp->flags & BNX2_FLAG_PCIX)) { 7961 !(bp->flags & BNX2_FLAG_PCIX)) {
7956 7962
7957 dev_err(&pdev->dev, 7963 dev_err(&pdev->dev,
7958 "5706 A1 can only be used in a PCIX bus, aborting.\n"); 7964 "5706 A1 can only be used in a PCIX bus, aborting\n");
7959 goto err_out_unmap; 7965 goto err_out_unmap;
7960 } 7966 }
7961 7967
@@ -7978,7 +7984,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7978 7984
7979 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) != 7985 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7980 BNX2_DEV_INFO_SIGNATURE_MAGIC) { 7986 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7981 dev_err(&pdev->dev, "Firmware not running, aborting.\n"); 7987 dev_err(&pdev->dev, "Firmware not running, aborting\n");
7982 rc = -ENODEV; 7988 rc = -ENODEV;
7983 goto err_out_unmap; 7989 goto err_out_unmap;
7984 } 7990 }
@@ -8229,7 +8235,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
8229#ifdef BCM_VLAN 8235#ifdef BCM_VLAN
8230 .ndo_vlan_rx_register = bnx2_vlan_rx_register, 8236 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
8231#endif 8237#endif
8232#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) 8238#ifdef CONFIG_NET_POLL_CONTROLLER
8233 .ndo_poll_controller = poll_bnx2, 8239 .ndo_poll_controller = poll_bnx2,
8234#endif 8240#endif
8235}; 8241};
@@ -8251,7 +8257,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8251 char str[40]; 8257 char str[40];
8252 8258
8253 if (version_printed++ == 0) 8259 if (version_printed++ == 0)
8254 printk(KERN_INFO "%s", version); 8260 pr_info("%s", version);
8255 8261
8256 /* dev zeroed in init_etherdev */ 8262 /* dev zeroed in init_etherdev */
8257 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS); 8263 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
@@ -8301,15 +8307,13 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8301 goto error; 8307 goto error;
8302 } 8308 }
8303 8309
8304 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, " 8310 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8305 "IRQ %d, node addr %pM\n", 8311 board_info[ent->driver_data].name,
8306 dev->name, 8312 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8307 board_info[ent->driver_data].name, 8313 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8308 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', 8314 bnx2_bus_string(bp, str),
8309 ((CHIP_ID(bp) & 0x0ff0) >> 4), 8315 dev->base_addr,
8310 bnx2_bus_string(bp, str), 8316 bp->pdev->irq, dev->dev_addr);
8311 dev->base_addr,
8312 bp->pdev->irq, dev->dev_addr);
8313 8317
8314 return 0; 8318 return 0;
8315 8319
@@ -8346,6 +8350,8 @@ bnx2_remove_one(struct pci_dev *pdev)
8346 if (bp->regview) 8350 if (bp->regview)
8347 iounmap(bp->regview); 8351 iounmap(bp->regview);
8348 8352
8353 kfree(bp->temp_stats_blk);
8354
8349 free_netdev(dev); 8355 free_netdev(dev);
8350 pci_release_regions(pdev); 8356 pci_release_regions(pdev);
8351 pci_disable_device(pdev); 8357 pci_disable_device(pdev);
@@ -8442,7 +8448,7 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8442 rtnl_lock(); 8448 rtnl_lock();
8443 if (pci_enable_device(pdev)) { 8449 if (pci_enable_device(pdev)) {
8444 dev_err(&pdev->dev, 8450 dev_err(&pdev->dev,
8445 "Cannot re-enable PCI device after reset.\n"); 8451 "Cannot re-enable PCI device after reset\n");
8446 rtnl_unlock(); 8452 rtnl_unlock();
8447 return PCI_ERS_RESULT_DISCONNECT; 8453 return PCI_ERS_RESULT_DISCONNECT;
8448 } 8454 }
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 939dc44d50a0..cd4b0e4637ab 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -349,7 +349,7 @@ struct l2_fhdr {
349#define BNX2_L2CTX_BD_PRE_READ 0x00000000 349#define BNX2_L2CTX_BD_PRE_READ 0x00000000
350#define BNX2_L2CTX_CTX_SIZE 0x00000000 350#define BNX2_L2CTX_CTX_SIZE 0x00000000
351#define BNX2_L2CTX_CTX_TYPE 0x00000000 351#define BNX2_L2CTX_CTX_TYPE 0x00000000
352#define BNX2_L2CTX_LO_WATER_MARK_DEFAULT 32 352#define BNX2_L2CTX_LO_WATER_MARK_DEFAULT 4
353#define BNX2_L2CTX_LO_WATER_MARK_SCALE 4 353#define BNX2_L2CTX_LO_WATER_MARK_SCALE 4
354#define BNX2_L2CTX_LO_WATER_MARK_DIS 0 354#define BNX2_L2CTX_LO_WATER_MARK_DIS 0
355#define BNX2_L2CTX_HI_WATER_MARK_SHIFT 4 355#define BNX2_L2CTX_HI_WATER_MARK_SHIFT 4
@@ -6851,6 +6851,7 @@ struct bnx2 {
6851 dma_addr_t status_blk_mapping; 6851 dma_addr_t status_blk_mapping;
6852 6852
6853 struct statistics_block *stats_blk; 6853 struct statistics_block *stats_blk;
6854 struct statistics_block *temp_stats_blk;
6854 dma_addr_t stats_blk_mapping; 6855 dma_addr_t stats_blk_mapping;
6855 6856
6856 int ctx_pages; 6857 int ctx_pages;
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 602ab86b6392..3c48a7a68308 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -1,6 +1,6 @@
1/* bnx2x.h: Broadcom Everest network driver. 1/* bnx2x.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2009 Broadcom Corporation 3 * Copyright (c) 2007-2010 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -44,7 +44,6 @@
44/* error/debug prints */ 44/* error/debug prints */
45 45
46#define DRV_MODULE_NAME "bnx2x" 46#define DRV_MODULE_NAME "bnx2x"
47#define PFX DRV_MODULE_NAME ": "
48 47
49/* for messages that are currently off */ 48/* for messages that are currently off */
50#define BNX2X_MSG_OFF 0 49#define BNX2X_MSG_OFF 0
@@ -58,30 +57,40 @@
58#define DP_LEVEL KERN_NOTICE /* was: KERN_DEBUG */ 57#define DP_LEVEL KERN_NOTICE /* was: KERN_DEBUG */
59 58
60/* regular debug print */ 59/* regular debug print */
61#define DP(__mask, __fmt, __args...) do { \ 60#define DP(__mask, __fmt, __args...) \
62 if (bp->msglevel & (__mask)) \ 61do { \
63 printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ 62 if (bp->msg_enable & (__mask)) \
64 bp->dev ? (bp->dev->name) : "?", ##__args); \ 63 printk(DP_LEVEL "[%s:%d(%s)]" __fmt, \
65 } while (0) 64 __func__, __LINE__, \
65 bp->dev ? (bp->dev->name) : "?", \
66 ##__args); \
67} while (0)
66 68
67/* errors debug print */ 69/* errors debug print */
68#define BNX2X_DBG_ERR(__fmt, __args...) do { \ 70#define BNX2X_DBG_ERR(__fmt, __args...) \
69 if (bp->msglevel & NETIF_MSG_PROBE) \ 71do { \
70 printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ 72 if (netif_msg_probe(bp)) \
71 bp->dev ? (bp->dev->name) : "?", ##__args); \ 73 pr_err("[%s:%d(%s)]" __fmt, \
72 } while (0) 74 __func__, __LINE__, \
75 bp->dev ? (bp->dev->name) : "?", \
76 ##__args); \
77} while (0)
73 78
74/* for errors (never masked) */ 79/* for errors (never masked) */
75#define BNX2X_ERR(__fmt, __args...) do { \ 80#define BNX2X_ERR(__fmt, __args...) \
76 printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ 81do { \
77 bp->dev ? (bp->dev->name) : "?", ##__args); \ 82 pr_err("[%s:%d(%s)]" __fmt, \
78 } while (0) 83 __func__, __LINE__, \
84 bp->dev ? (bp->dev->name) : "?", \
85 ##__args); \
86} while (0)
79 87
80/* before we have a dev->name use dev_info() */ 88/* before we have a dev->name use dev_info() */
81#define BNX2X_DEV_INFO(__fmt, __args...) do { \ 89#define BNX2X_DEV_INFO(__fmt, __args...) \
82 if (bp->msglevel & NETIF_MSG_PROBE) \ 90do { \
83 dev_info(&bp->pdev->dev, __fmt, ##__args); \ 91 if (netif_msg_probe(bp)) \
84 } while (0) 92 dev_info(&bp->pdev->dev, __fmt, ##__args); \
93} while (0)
85 94
86 95
87#ifdef BNX2X_STOP_ON_ERROR 96#ifdef BNX2X_STOP_ON_ERROR
@@ -130,7 +139,7 @@
130 offset, len32); \ 139 offset, len32); \
131 } while (0) 140 } while (0)
132 141
133#define VIRT_WR_DMAE_LEN(bp, data, addr, len32) \ 142#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
134 do { \ 143 do { \
135 memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \ 144 memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
136 bnx2x_write_big_buf_wb(bp, addr, len32); \ 145 bnx2x_write_big_buf_wb(bp, addr, len32); \
@@ -882,7 +891,7 @@ struct bnx2x {
882 /* End of fields used in the performance code paths */ 891 /* End of fields used in the performance code paths */
883 892
884 int panic; 893 int panic;
885 int msglevel; 894 int msg_enable;
886 895
887 u32 flags; 896 u32 flags;
888#define PCIX_FLAG 1 897#define PCIX_FLAG 1
diff --git a/drivers/net/bnx2x_fw_defs.h b/drivers/net/bnx2x_fw_defs.h
index 931dcace5628..08d71bf438d6 100644
--- a/drivers/net/bnx2x_fw_defs.h
+++ b/drivers/net/bnx2x_fw_defs.h
@@ -1,6 +1,6 @@
1/* bnx2x_fw_defs.h: Broadcom Everest network driver. 1/* bnx2x_fw_defs.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2009 Broadcom Corporation 3 * Copyright (c) 2007-2010 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -471,6 +471,11 @@
471 471
472 472
473/* Host coalescing constants */ 473/* Host coalescing constants */
474#define HC_IGU_BC_MODE 0
475#define HC_IGU_NBC_MODE 1
476
477#define HC_REGULAR_SEGMENT 0
478#define HC_DEFAULT_SEGMENT 1
474 479
475/* index numbers */ 480/* index numbers */
476#define HC_USTORM_DEF_SB_NUM_INDICES 8 481#define HC_USTORM_DEF_SB_NUM_INDICES 8
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h
index 52585338ada8..760069345b11 100644
--- a/drivers/net/bnx2x_hsi.h
+++ b/drivers/net/bnx2x_hsi.h
@@ -1,6 +1,6 @@
1/* bnx2x_hsi.h: Broadcom Everest network driver. 1/* bnx2x_hsi.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2009 Broadcom Corporation 3 * Copyright (c) 2007-2010 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -1261,7 +1261,7 @@ struct host_func_stats {
1261 1261
1262#define BCM_5710_FW_MAJOR_VERSION 5 1262#define BCM_5710_FW_MAJOR_VERSION 5
1263#define BCM_5710_FW_MINOR_VERSION 2 1263#define BCM_5710_FW_MINOR_VERSION 2
1264#define BCM_5710_FW_REVISION_VERSION 7 1264#define BCM_5710_FW_REVISION_VERSION 13
1265#define BCM_5710_FW_ENGINEERING_VERSION 0 1265#define BCM_5710_FW_ENGINEERING_VERSION 0
1266#define BCM_5710_FW_COMPILE_FLAGS 1 1266#define BCM_5710_FW_COMPILE_FLAGS 1
1267 1267
@@ -2433,8 +2433,10 @@ struct common_ramrod_eth_rx_cqe {
2433 u8 ramrod_type; 2433 u8 ramrod_type;
2434#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x1<<0) 2434#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x1<<0)
2435#define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0 2435#define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0
2436#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x7F<<1) 2436#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<1)
2437#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 1 2437#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 1
2438#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x3F<<2)
2439#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 2
2438 u8 conn_type; 2440 u8 conn_type;
2439 __le16 reserved1; 2441 __le16 reserved1;
2440 __le32 conn_and_cmd_data; 2442 __le32 conn_and_cmd_data;
diff --git a/drivers/net/bnx2x_init_ops.h b/drivers/net/bnx2x_init_ops.h
index 38b970a14fd7..2b1363a6fe78 100644
--- a/drivers/net/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x_init_ops.h
@@ -2,7 +2,7 @@
2 * Static functions needed during the initialization. 2 * Static functions needed during the initialization.
3 * This file is "included" in bnx2x_main.c. 3 * This file is "included" in bnx2x_main.c.
4 * 4 *
5 * Copyright (c) 2007-2009 Broadcom Corporation 5 * Copyright (c) 2007-2010 Broadcom Corporation
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -138,11 +138,16 @@ static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
138static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data, 138static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
139 u32 len) 139 u32 len)
140{ 140{
141 const u32 *old_data = data;
142
141 data = (const u32 *)bnx2x_sel_blob(bp, addr, (const u8 *)data); 143 data = (const u32 *)bnx2x_sel_blob(bp, addr, (const u8 *)data);
142 144
143 if (bp->dmae_ready) 145 if (bp->dmae_ready) {
144 VIRT_WR_DMAE_LEN(bp, data, addr, len); 146 if (old_data != data)
145 else 147 VIRT_WR_DMAE_LEN(bp, data, addr, len, 1);
148 else
149 VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
150 } else
146 bnx2x_init_ind_wr(bp, addr, data, len); 151 bnx2x_init_ind_wr(bp, addr, data, len);
147} 152}
148 153
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index cf5778919b4b..32e79c359e89 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -14,6 +14,8 @@
14 * 14 *
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <linux/kernel.h> 19#include <linux/kernel.h>
18#include <linux/errno.h> 20#include <linux/errno.h>
19#include <linux/pci.h> 21#include <linux/pci.h>
@@ -2987,11 +2989,8 @@ static u8 bnx2x_verify_sfp_module(struct link_params *params)
2987 else 2989 else
2988 vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0'; 2990 vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
2989 2991
2990 printk(KERN_INFO PFX "Warning: " 2992 netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected, Port %d from %s part number %s\n",
2991 "Unqualified SFP+ module " 2993 params->port, vendor_name, vendor_pn);
2992 "detected on %s, Port %d from %s part number %s\n"
2993 , bp->dev->name, params->port,
2994 vendor_name, vendor_pn);
2995 return -EINVAL; 2994 return -EINVAL;
2996} 2995}
2997 2996
@@ -4846,16 +4845,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
4846 " has been detected on " 4845 " has been detected on "
4847 "port %d\n", 4846 "port %d\n",
4848 params->port); 4847 params->port);
4849 printk(KERN_ERR PFX "Error: Power" 4848 netdev_err(bp->dev, "Error: Power fault on Port %d has been detected and the power to that SFP+ module has been removed to prevent failure of the card. Please remove the SFP+ module and restart the system to clear this error.\n",
4850 " fault on %s Port %d has" 4849 params->port);
4851 " been detected and the"
4852 " power to that SFP+ module"
4853 " has been removed to prevent"
4854 " failure of the card. Please"
4855 " remove the SFP+ module and"
4856 " restart the system to clear"
4857 " this error.\n"
4858 , bp->dev->name, params->port);
4859 /* 4850 /*
4860 * Disable all RX_ALARMs except for 4851 * Disable all RX_ALARMs except for
4861 * mod_abs 4852 * mod_abs
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 306c2b8165e2..ed785a30e98b 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -1,6 +1,6 @@
1/* bnx2x_main.c: Broadcom Everest network driver. 1/* bnx2x_main.c: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2009 Broadcom Corporation 3 * Copyright (c) 2007-2010 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -57,8 +57,8 @@
57#include "bnx2x_init_ops.h" 57#include "bnx2x_init_ops.h"
58#include "bnx2x_dump.h" 58#include "bnx2x_dump.h"
59 59
60#define DRV_MODULE_VERSION "1.52.1-5" 60#define DRV_MODULE_VERSION "1.52.1-7"
61#define DRV_MODULE_RELDATE "2009/11/09" 61#define DRV_MODULE_RELDATE "2010/02/28"
62#define BNX2X_BC_VER 0x040200 62#define BNX2X_BC_VER 0x040200
63 63
64#include <linux/firmware.h> 64#include <linux/firmware.h>
@@ -140,7 +140,7 @@ static struct {
140}; 140};
141 141
142 142
143static const struct pci_device_id bnx2x_pci_tbl[] = { 143static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, 144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, 145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, 146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
@@ -514,24 +514,24 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
514 514
515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104); 515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
516 mark = ((mark + 0x3) & ~0x3); 516 mark = ((mark + 0x3) & ~0x3);
517 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark); 517 pr_err("begin fw dump (mark 0x%x)\n", mark);
518 518
519 printk(KERN_ERR PFX); 519 pr_err("");
520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) { 520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521 for (word = 0; word < 8; word++) 521 for (word = 0; word < 8; word++)
522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH + 522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523 offset + 4*word)); 523 offset + 4*word));
524 data[8] = 0x0; 524 data[8] = 0x0;
525 printk(KERN_CONT "%s", (char *)data); 525 pr_cont("%s", (char *)data);
526 } 526 }
527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) { 527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528 for (word = 0; word < 8; word++) 528 for (word = 0; word < 8; word++)
529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH + 529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530 offset + 4*word)); 530 offset + 4*word));
531 data[8] = 0x0; 531 data[8] = 0x0;
532 printk(KERN_CONT "%s", (char *)data); 532 pr_cont("%s", (char *)data);
533 } 533 }
534 printk(KERN_ERR PFX "end of fw dump\n"); 534 pr_err("end of fw dump\n");
535} 535}
536 536
537static void bnx2x_panic_dump(struct bnx2x *bp) 537static void bnx2x_panic_dump(struct bnx2x *bp)
@@ -957,21 +957,34 @@ static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
957 fp->tx_pkt_cons = sw_cons; 957 fp->tx_pkt_cons = sw_cons;
958 fp->tx_bd_cons = bd_cons; 958 fp->tx_bd_cons = bd_cons;
959 959
960 /* Need to make the tx_bd_cons update visible to start_xmit()
961 * before checking for netif_tx_queue_stopped(). Without the
962 * memory barrier, there is a small possibility that
963 * start_xmit() will miss it and cause the queue to be stopped
964 * forever.
965 */
966 smp_wmb();
967
960 /* TBD need a thresh? */ 968 /* TBD need a thresh? */
961 if (unlikely(netif_tx_queue_stopped(txq))) { 969 if (unlikely(netif_tx_queue_stopped(txq))) {
962 970 /* Taking tx_lock() is needed to prevent reenabling the queue
963 /* Need to make the tx_bd_cons update visible to start_xmit() 971 * while it's empty. This could have happen if rx_action() gets
964 * before checking for netif_tx_queue_stopped(). Without the 972 * suspended in bnx2x_tx_int() after the condition before
965 * memory barrier, there is a small possibility that 973 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
966 * start_xmit() will miss it and cause the queue to be stopped 974 *
967 * forever. 975 * stops the queue->sees fresh tx_bd_cons->releases the queue->
976 * sends some packets consuming the whole queue again->
977 * stops the queue
968 */ 978 */
969 smp_mb(); 979
980 __netif_tx_lock(txq, smp_processor_id());
970 981
971 if ((netif_tx_queue_stopped(txq)) && 982 if ((netif_tx_queue_stopped(txq)) &&
972 (bp->state == BNX2X_STATE_OPEN) && 983 (bp->state == BNX2X_STATE_OPEN) &&
973 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) 984 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
974 netif_tx_wake_queue(txq); 985 netif_tx_wake_queue(txq);
986
987 __netif_tx_unlock(txq);
975 } 988 }
976 return 0; 989 return 0;
977} 990}
@@ -2136,7 +2149,7 @@ static void bnx2x_link_report(struct bnx2x *bp)
2136{ 2149{
2137 if (bp->flags & MF_FUNC_DIS) { 2150 if (bp->flags & MF_FUNC_DIS) {
2138 netif_carrier_off(bp->dev); 2151 netif_carrier_off(bp->dev);
2139 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name); 2152 netdev_err(bp->dev, "NIC Link is Down\n");
2140 return; 2153 return;
2141 } 2154 }
2142 2155
@@ -2145,7 +2158,7 @@ static void bnx2x_link_report(struct bnx2x *bp)
2145 2158
2146 if (bp->state == BNX2X_STATE_OPEN) 2159 if (bp->state == BNX2X_STATE_OPEN)
2147 netif_carrier_on(bp->dev); 2160 netif_carrier_on(bp->dev);
2148 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name); 2161 netdev_info(bp->dev, "NIC Link is Up, ");
2149 2162
2150 line_speed = bp->link_vars.line_speed; 2163 line_speed = bp->link_vars.line_speed;
2151 if (IS_E1HMF(bp)) { 2164 if (IS_E1HMF(bp)) {
@@ -2157,29 +2170,29 @@ static void bnx2x_link_report(struct bnx2x *bp)
2157 if (vn_max_rate < line_speed) 2170 if (vn_max_rate < line_speed)
2158 line_speed = vn_max_rate; 2171 line_speed = vn_max_rate;
2159 } 2172 }
2160 printk("%d Mbps ", line_speed); 2173 pr_cont("%d Mbps ", line_speed);
2161 2174
2162 if (bp->link_vars.duplex == DUPLEX_FULL) 2175 if (bp->link_vars.duplex == DUPLEX_FULL)
2163 printk("full duplex"); 2176 pr_cont("full duplex");
2164 else 2177 else
2165 printk("half duplex"); 2178 pr_cont("half duplex");
2166 2179
2167 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) { 2180 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2168 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) { 2181 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2169 printk(", receive "); 2182 pr_cont(", receive ");
2170 if (bp->link_vars.flow_ctrl & 2183 if (bp->link_vars.flow_ctrl &
2171 BNX2X_FLOW_CTRL_TX) 2184 BNX2X_FLOW_CTRL_TX)
2172 printk("& transmit "); 2185 pr_cont("& transmit ");
2173 } else { 2186 } else {
2174 printk(", transmit "); 2187 pr_cont(", transmit ");
2175 } 2188 }
2176 printk("flow control ON"); 2189 pr_cont("flow control ON");
2177 } 2190 }
2178 printk("\n"); 2191 pr_cont("\n");
2179 2192
2180 } else { /* link_down */ 2193 } else { /* link_down */
2181 netif_carrier_off(bp->dev); 2194 netif_carrier_off(bp->dev);
2182 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name); 2195 netdev_err(bp->dev, "NIC Link is Down\n");
2183 } 2196 }
2184} 2197}
2185 2198
@@ -2898,10 +2911,8 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
2898 bp->link_params.ext_phy_config); 2911 bp->link_params.ext_phy_config);
2899 2912
2900 /* log the failure */ 2913 /* log the failure */
2901 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused" 2914 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
2902 " the driver to shutdown the card to prevent permanent" 2915 "Please contact Dell Support for assistance.\n");
2903 " damage. Please contact Dell Support for assistance\n",
2904 bp->dev->name);
2905} 2916}
2906 2917
2907static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) 2918static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -4296,7 +4307,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
4296 bnx2x_net_stats_update(bp); 4307 bnx2x_net_stats_update(bp);
4297 bnx2x_drv_stats_update(bp); 4308 bnx2x_drv_stats_update(bp);
4298 4309
4299 if (bp->msglevel & NETIF_MSG_TIMER) { 4310 if (netif_msg_timer(bp)) {
4300 struct bnx2x_fastpath *fp0_rx = bp->fp; 4311 struct bnx2x_fastpath *fp0_rx = bp->fp;
4301 struct bnx2x_fastpath *fp0_tx = bp->fp; 4312 struct bnx2x_fastpath *fp0_tx = bp->fp;
4302 struct tstorm_per_client_stats *old_tclient = 4313 struct tstorm_per_client_stats *old_tclient =
@@ -4306,7 +4317,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
4306 struct net_device_stats *nstats = &bp->dev->stats; 4317 struct net_device_stats *nstats = &bp->dev->stats;
4307 int i; 4318 int i;
4308 4319
4309 printk(KERN_DEBUG "%s:\n", bp->dev->name); 4320 netdev_printk(KERN_DEBUG, bp->dev, "\n");
4310 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)" 4321 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4311 " tx pkt (%lx)\n", 4322 " tx pkt (%lx)\n",
4312 bnx2x_tx_avail(fp0_tx), 4323 bnx2x_tx_avail(fp0_tx),
@@ -4464,7 +4475,7 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4464 /* Make sure the state has been "changed" */ 4475 /* Make sure the state has been "changed" */
4465 smp_wmb(); 4476 smp_wmb();
4466 4477
4467 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER)) 4478 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
4468 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 4479 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4469 state, event, bp->stats_state); 4480 state, event, bp->stats_state);
4470} 4481}
@@ -5674,8 +5685,7 @@ gunzip_nomem2:
5674 bp->gunzip_buf = NULL; 5685 bp->gunzip_buf = NULL;
5675 5686
5676gunzip_nomem1: 5687gunzip_nomem1:
5677 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for" 5688 netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n");
5678 " un-compression\n", bp->dev->name);
5679 return -ENOMEM; 5689 return -ENOMEM;
5680} 5690}
5681 5691
@@ -5721,14 +5731,13 @@ static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5721 5731
5722 rc = zlib_inflate(bp->strm, Z_FINISH); 5732 rc = zlib_inflate(bp->strm, Z_FINISH);
5723 if ((rc != Z_OK) && (rc != Z_STREAM_END)) 5733 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5724 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n", 5734 netdev_err(bp->dev, "Firmware decompression error: %s\n",
5725 bp->dev->name, bp->strm->msg); 5735 bp->strm->msg);
5726 5736
5727 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); 5737 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5728 if (bp->gunzip_outlen & 0x3) 5738 if (bp->gunzip_outlen & 0x3)
5729 printk(KERN_ERR PFX "%s: Firmware decompression error:" 5739 netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
5730 " gunzip_outlen (%d) not aligned\n", 5740 bp->gunzip_outlen);
5731 bp->dev->name, bp->gunzip_outlen);
5732 bp->gunzip_outlen >>= 2; 5741 bp->gunzip_outlen >>= 2;
5733 5742
5734 zlib_inflateEnd(bp->strm); 5743 zlib_inflateEnd(bp->strm);
@@ -6213,8 +6222,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
6213 6222
6214 if (sizeof(union cdu_context) != 1024) 6223 if (sizeof(union cdu_context) != 1024)
6215 /* we currently assume that a context is 1024 bytes */ 6224 /* we currently assume that a context is 1024 bytes */
6216 printk(KERN_ALERT PFX "please adjust the size of" 6225 pr_alert("please adjust the size of cdu_context(%ld)\n",
6217 " cdu_context(%ld)\n", (long)sizeof(union cdu_context)); 6226 (long)sizeof(union cdu_context));
6218 6227
6219 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE); 6228 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6220 val = (4 << 24) + (0 << 12) + 1024; 6229 val = (4 << 24) + (0 << 12) + 1024;
@@ -6938,19 +6947,21 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6938 } 6947 }
6939} 6948}
6940 6949
6941static void bnx2x_free_irq(struct bnx2x *bp) 6950static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
6942{ 6951{
6943 if (bp->flags & USING_MSIX_FLAG) { 6952 if (bp->flags & USING_MSIX_FLAG) {
6944 bnx2x_free_msix_irqs(bp); 6953 if (!disable_only)
6954 bnx2x_free_msix_irqs(bp);
6945 pci_disable_msix(bp->pdev); 6955 pci_disable_msix(bp->pdev);
6946 bp->flags &= ~USING_MSIX_FLAG; 6956 bp->flags &= ~USING_MSIX_FLAG;
6947 6957
6948 } else if (bp->flags & USING_MSI_FLAG) { 6958 } else if (bp->flags & USING_MSI_FLAG) {
6949 free_irq(bp->pdev->irq, bp->dev); 6959 if (!disable_only)
6960 free_irq(bp->pdev->irq, bp->dev);
6950 pci_disable_msi(bp->pdev); 6961 pci_disable_msi(bp->pdev);
6951 bp->flags &= ~USING_MSI_FLAG; 6962 bp->flags &= ~USING_MSI_FLAG;
6952 6963
6953 } else 6964 } else if (!disable_only)
6954 free_irq(bp->pdev->irq, bp->dev); 6965 free_irq(bp->pdev->irq, bp->dev);
6955} 6966}
6956 6967
@@ -7018,11 +7029,10 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7018 } 7029 }
7019 7030
7020 i = BNX2X_NUM_QUEUES(bp); 7031 i = BNX2X_NUM_QUEUES(bp);
7021 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d" 7032 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
7022 " ... fp[%d] %d\n", 7033 bp->msix_table[0].vector,
7023 bp->dev->name, bp->msix_table[0].vector, 7034 0, bp->msix_table[offset].vector,
7024 0, bp->msix_table[offset].vector, 7035 i - 1, bp->msix_table[offset + i - 1].vector);
7025 i - 1, bp->msix_table[offset + i - 1].vector);
7026 7036
7027 return 0; 7037 return 0;
7028} 7038}
@@ -7443,8 +7453,10 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7443 7453
7444 rc = bnx2x_set_num_queues(bp); 7454 rc = bnx2x_set_num_queues(bp);
7445 7455
7446 if (bnx2x_alloc_mem(bp)) 7456 if (bnx2x_alloc_mem(bp)) {
7457 bnx2x_free_irq(bp, true);
7447 return -ENOMEM; 7458 return -ENOMEM;
7459 }
7448 7460
7449 for_each_queue(bp, i) 7461 for_each_queue(bp, i)
7450 bnx2x_fp(bp, i, disable_tpa) = 7462 bnx2x_fp(bp, i, disable_tpa) =
@@ -7459,7 +7471,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7459 if (bp->flags & USING_MSIX_FLAG) { 7471 if (bp->flags & USING_MSIX_FLAG) {
7460 rc = bnx2x_req_msix_irqs(bp); 7472 rc = bnx2x_req_msix_irqs(bp);
7461 if (rc) { 7473 if (rc) {
7462 pci_disable_msix(bp->pdev); 7474 bnx2x_free_irq(bp, true);
7463 goto load_error1; 7475 goto load_error1;
7464 } 7476 }
7465 } else { 7477 } else {
@@ -7471,14 +7483,13 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7471 rc = bnx2x_req_irq(bp); 7483 rc = bnx2x_req_irq(bp);
7472 if (rc) { 7484 if (rc) {
7473 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); 7485 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7474 if (bp->flags & USING_MSI_FLAG) 7486 bnx2x_free_irq(bp, true);
7475 pci_disable_msi(bp->pdev);
7476 goto load_error1; 7487 goto load_error1;
7477 } 7488 }
7478 if (bp->flags & USING_MSI_FLAG) { 7489 if (bp->flags & USING_MSI_FLAG) {
7479 bp->dev->irq = bp->pdev->irq; 7490 bp->dev->irq = bp->pdev->irq;
7480 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n", 7491 netdev_info(bp->dev, "using MSI IRQ %d\n",
7481 bp->dev->name, bp->pdev->irq); 7492 bp->pdev->irq);
7482 } 7493 }
7483 } 7494 }
7484 7495
@@ -7527,6 +7538,9 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7527 rc = bnx2x_init_hw(bp, load_code); 7538 rc = bnx2x_init_hw(bp, load_code);
7528 if (rc) { 7539 if (rc) {
7529 BNX2X_ERR("HW init failed, aborting\n"); 7540 BNX2X_ERR("HW init failed, aborting\n");
7541 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7542 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7543 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7530 goto load_error2; 7544 goto load_error2;
7531 } 7545 }
7532 7546
@@ -7664,7 +7678,7 @@ load_error3:
7664 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 7678 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7665load_error2: 7679load_error2:
7666 /* Release IRQs */ 7680 /* Release IRQs */
7667 bnx2x_free_irq(bp); 7681 bnx2x_free_irq(bp, false);
7668load_error1: 7682load_error1:
7669 bnx2x_napi_disable(bp); 7683 bnx2x_napi_disable(bp);
7670 for_each_queue(bp, i) 7684 for_each_queue(bp, i)
@@ -7855,7 +7869,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7855 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 7869 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7856 7870
7857 /* Release IRQs */ 7871 /* Release IRQs */
7858 bnx2x_free_irq(bp); 7872 bnx2x_free_irq(bp, false);
7859 7873
7860 /* Wait until tx fastpath tasks complete */ 7874 /* Wait until tx fastpath tasks complete */
7861 for_each_queue(bp, i) { 7875 for_each_queue(bp, i) {
@@ -8297,8 +8311,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8297 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); 8311 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8298 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); 8312 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8299 8313
8300 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n", 8314 pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4);
8301 val, val2, val3, val4);
8302} 8315}
8303 8316
8304static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, 8317static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
@@ -8909,17 +8922,15 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8909 bnx2x_undi_unload(bp); 8922 bnx2x_undi_unload(bp);
8910 8923
8911 if (CHIP_REV_IS_FPGA(bp)) 8924 if (CHIP_REV_IS_FPGA(bp))
8912 printk(KERN_ERR PFX "FPGA detected\n"); 8925 pr_err("FPGA detected\n");
8913 8926
8914 if (BP_NOMCP(bp) && (func == 0)) 8927 if (BP_NOMCP(bp) && (func == 0))
8915 printk(KERN_ERR PFX 8928 pr_err("MCP disabled, must load devices in order!\n");
8916 "MCP disabled, must load devices in order!\n");
8917 8929
8918 /* Set multi queue mode */ 8930 /* Set multi queue mode */
8919 if ((multi_mode != ETH_RSS_MODE_DISABLED) && 8931 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8920 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) { 8932 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8921 printk(KERN_ERR PFX 8933 pr_err("Multi disabled since int_mode requested is not MSI-X\n");
8922 "Multi disabled since int_mode requested is not MSI-X\n");
8923 multi_mode = ETH_RSS_MODE_DISABLED; 8934 multi_mode = ETH_RSS_MODE_DISABLED;
8924 } 8935 }
8925 bp->multi_mode = multi_mode; 8936 bp->multi_mode = multi_mode;
@@ -9345,7 +9356,7 @@ static u32 bnx2x_get_msglevel(struct net_device *dev)
9345{ 9356{
9346 struct bnx2x *bp = netdev_priv(dev); 9357 struct bnx2x *bp = netdev_priv(dev);
9347 9358
9348 return bp->msglevel; 9359 return bp->msg_enable;
9349} 9360}
9350 9361
9351static void bnx2x_set_msglevel(struct net_device *dev, u32 level) 9362static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
@@ -9353,7 +9364,7 @@ static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9353 struct bnx2x *bp = netdev_priv(dev); 9364 struct bnx2x *bp = netdev_priv(dev);
9354 9365
9355 if (capable(CAP_NET_ADMIN)) 9366 if (capable(CAP_NET_ADMIN))
9356 bp->msglevel = level; 9367 bp->msg_enable = level;
9357} 9368}
9358 9369
9359static int bnx2x_nway_reset(struct net_device *dev) 9370static int bnx2x_nway_reset(struct net_device *dev)
@@ -9962,12 +9973,14 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data)
9962 9973
9963 /* TPA requires Rx CSUM offloading */ 9974 /* TPA requires Rx CSUM offloading */
9964 if ((data & ETH_FLAG_LRO) && bp->rx_csum) { 9975 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9965 if (!(dev->features & NETIF_F_LRO)) { 9976 if (!disable_tpa) {
9966 dev->features |= NETIF_F_LRO; 9977 if (!(dev->features & NETIF_F_LRO)) {
9967 bp->flags |= TPA_ENABLE_FLAG; 9978 dev->features |= NETIF_F_LRO;
9968 changed = 1; 9979 bp->flags |= TPA_ENABLE_FLAG;
9969 } 9980 changed = 1;
9970 9981 }
9982 } else
9983 rc = -EINVAL;
9971 } else if (dev->features & NETIF_F_LRO) { 9984 } else if (dev->features & NETIF_F_LRO) {
9972 dev->features &= ~NETIF_F_LRO; 9985 dev->features &= ~NETIF_F_LRO;
9973 bp->flags &= ~TPA_ENABLE_FLAG; 9986 bp->flags &= ~TPA_ENABLE_FLAG;
@@ -10425,7 +10438,8 @@ static int bnx2x_test_intr(struct bnx2x *bp)
10425 10438
10426 config->hdr.length = 0; 10439 config->hdr.length = 0;
10427 if (CHIP_IS_E1(bp)) 10440 if (CHIP_IS_E1(bp))
10428 config->hdr.offset = (BP_PORT(bp) ? 32 : 0); 10441 /* use last unicast entries */
10442 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
10429 else 10443 else
10430 config->hdr.offset = BP_FUNC(bp); 10444 config->hdr.offset = BP_FUNC(bp);
10431 config->hdr.client_id = bp->fp->cl_id; 10445 config->hdr.client_id = bp->fp->cl_id;
@@ -10644,7 +10658,7 @@ static const struct {
10644 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT) 10658 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10645#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) 10659#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10646#define IS_E1HMF_MODE_STAT(bp) \ 10660#define IS_E1HMF_MODE_STAT(bp) \
10647 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS)) 10661 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
10648 10662
10649static int bnx2x_get_sset_count(struct net_device *dev, int stringset) 10663static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10650{ 10664{
@@ -11471,7 +11485,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
11471 rx_mode = BNX2X_RX_MODE_PROMISC; 11485 rx_mode = BNX2X_RX_MODE_PROMISC;
11472 11486
11473 else if ((dev->flags & IFF_ALLMULTI) || 11487 else if ((dev->flags & IFF_ALLMULTI) ||
11474 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp))) 11488 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
11489 CHIP_IS_E1(bp)))
11475 rx_mode = BNX2X_RX_MODE_ALLMULTI; 11490 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11476 11491
11477 else { /* some multicasts */ 11492 else { /* some multicasts */
@@ -11481,10 +11496,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
11481 struct mac_configuration_cmd *config = 11496 struct mac_configuration_cmd *config =
11482 bnx2x_sp(bp, mcast_config); 11497 bnx2x_sp(bp, mcast_config);
11483 11498
11484 for (i = 0, mclist = dev->mc_list; 11499 i = 0;
11485 mclist && (i < dev->mc_count); 11500 netdev_for_each_mc_addr(mclist, dev) {
11486 i++, mclist = mclist->next) {
11487
11488 config->config_table[i]. 11501 config->config_table[i].
11489 cam_entry.msb_mac_addr = 11502 cam_entry.msb_mac_addr =
11490 swab16(*(u16 *)&mclist->dmi_addr[0]); 11503 swab16(*(u16 *)&mclist->dmi_addr[0]);
@@ -11512,6 +11525,7 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
11512 cam_entry.middle_mac_addr, 11525 cam_entry.middle_mac_addr,
11513 config->config_table[i]. 11526 config->config_table[i].
11514 cam_entry.lsb_mac_addr); 11527 cam_entry.lsb_mac_addr);
11528 i++;
11515 } 11529 }
11516 old = config->hdr.length; 11530 old = config->hdr.length;
11517 if (old > i) { 11531 if (old > i) {
@@ -11553,10 +11567,7 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
11553 11567
11554 memset(mc_filter, 0, 4 * MC_HASH_SIZE); 11568 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11555 11569
11556 for (i = 0, mclist = dev->mc_list; 11570 netdev_for_each_mc_addr(mclist, dev) {
11557 mclist && (i < dev->mc_count);
11558 i++, mclist = mclist->next) {
11559
11560 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", 11571 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11561 mclist->dmi_addr); 11572 mclist->dmi_addr);
11562 11573
@@ -11731,7 +11742,7 @@ static void bnx2x_vlan_rx_register(struct net_device *dev,
11731 11742
11732#endif 11743#endif
11733 11744
11734#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) 11745#ifdef CONFIG_NET_POLL_CONTROLLER
11735static void poll_bnx2x(struct net_device *dev) 11746static void poll_bnx2x(struct net_device *dev)
11736{ 11747{
11737 struct bnx2x *bp = netdev_priv(dev); 11748 struct bnx2x *bp = netdev_priv(dev);
@@ -11755,7 +11766,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
11755#ifdef BCM_VLAN 11766#ifdef BCM_VLAN
11756 .ndo_vlan_rx_register = bnx2x_vlan_rx_register, 11767 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11757#endif 11768#endif
11758#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) 11769#ifdef CONFIG_NET_POLL_CONTROLLER
11759 .ndo_poll_controller = poll_bnx2x, 11770 .ndo_poll_controller = poll_bnx2x,
11760#endif 11771#endif
11761}; 11772};
@@ -11776,20 +11787,18 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11776 11787
11777 rc = pci_enable_device(pdev); 11788 rc = pci_enable_device(pdev);
11778 if (rc) { 11789 if (rc) {
11779 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n"); 11790 pr_err("Cannot enable PCI device, aborting\n");
11780 goto err_out; 11791 goto err_out;
11781 } 11792 }
11782 11793
11783 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 11794 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11784 printk(KERN_ERR PFX "Cannot find PCI device base address," 11795 pr_err("Cannot find PCI device base address, aborting\n");
11785 " aborting\n");
11786 rc = -ENODEV; 11796 rc = -ENODEV;
11787 goto err_out_disable; 11797 goto err_out_disable;
11788 } 11798 }
11789 11799
11790 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 11800 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11791 printk(KERN_ERR PFX "Cannot find second PCI device" 11801 pr_err("Cannot find second PCI device base address, aborting\n");
11792 " base address, aborting\n");
11793 rc = -ENODEV; 11802 rc = -ENODEV;
11794 goto err_out_disable; 11803 goto err_out_disable;
11795 } 11804 }
@@ -11797,8 +11806,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11797 if (atomic_read(&pdev->enable_cnt) == 1) { 11806 if (atomic_read(&pdev->enable_cnt) == 1) {
11798 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 11807 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11799 if (rc) { 11808 if (rc) {
11800 printk(KERN_ERR PFX "Cannot obtain PCI resources," 11809 pr_err("Cannot obtain PCI resources, aborting\n");
11801 " aborting\n");
11802 goto err_out_disable; 11810 goto err_out_disable;
11803 } 11811 }
11804 11812
@@ -11808,16 +11816,14 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11808 11816
11809 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 11817 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11810 if (bp->pm_cap == 0) { 11818 if (bp->pm_cap == 0) {
11811 printk(KERN_ERR PFX "Cannot find power management" 11819 pr_err("Cannot find power management capability, aborting\n");
11812 " capability, aborting\n");
11813 rc = -EIO; 11820 rc = -EIO;
11814 goto err_out_release; 11821 goto err_out_release;
11815 } 11822 }
11816 11823
11817 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); 11824 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11818 if (bp->pcie_cap == 0) { 11825 if (bp->pcie_cap == 0) {
11819 printk(KERN_ERR PFX "Cannot find PCI Express capability," 11826 pr_err("Cannot find PCI Express capability, aborting\n");
11820 " aborting\n");
11821 rc = -EIO; 11827 rc = -EIO;
11822 goto err_out_release; 11828 goto err_out_release;
11823 } 11829 }
@@ -11825,15 +11831,13 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11825 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { 11831 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11826 bp->flags |= USING_DAC_FLAG; 11832 bp->flags |= USING_DAC_FLAG;
11827 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { 11833 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11828 printk(KERN_ERR PFX "pci_set_consistent_dma_mask" 11834 pr_err("pci_set_consistent_dma_mask failed, aborting\n");
11829 " failed, aborting\n");
11830 rc = -EIO; 11835 rc = -EIO;
11831 goto err_out_release; 11836 goto err_out_release;
11832 } 11837 }
11833 11838
11834 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 11839 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11835 printk(KERN_ERR PFX "System does not support DMA," 11840 pr_err("System does not support DMA, aborting\n");
11836 " aborting\n");
11837 rc = -EIO; 11841 rc = -EIO;
11838 goto err_out_release; 11842 goto err_out_release;
11839 } 11843 }
@@ -11846,7 +11850,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11846 11850
11847 bp->regview = pci_ioremap_bar(pdev, 0); 11851 bp->regview = pci_ioremap_bar(pdev, 0);
11848 if (!bp->regview) { 11852 if (!bp->regview) {
11849 printk(KERN_ERR PFX "Cannot map register space, aborting\n"); 11853 pr_err("Cannot map register space, aborting\n");
11850 rc = -ENOMEM; 11854 rc = -ENOMEM;
11851 goto err_out_release; 11855 goto err_out_release;
11852 } 11856 }
@@ -11855,7 +11859,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11855 min_t(u64, BNX2X_DB_SIZE, 11859 min_t(u64, BNX2X_DB_SIZE,
11856 pci_resource_len(pdev, 2))); 11860 pci_resource_len(pdev, 2)));
11857 if (!bp->doorbells) { 11861 if (!bp->doorbells) {
11858 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n"); 11862 pr_err("Cannot map doorbell space, aborting\n");
11859 rc = -ENOMEM; 11863 rc = -ENOMEM;
11860 goto err_out_unmap; 11864 goto err_out_unmap;
11861 } 11865 }
@@ -11957,8 +11961,7 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11957 offset = be32_to_cpu(sections[i].offset); 11961 offset = be32_to_cpu(sections[i].offset);
11958 len = be32_to_cpu(sections[i].len); 11962 len = be32_to_cpu(sections[i].len);
11959 if (offset + len > firmware->size) { 11963 if (offset + len > firmware->size) {
11960 printk(KERN_ERR PFX "Section %d length is out of " 11964 pr_err("Section %d length is out of bounds\n", i);
11961 "bounds\n", i);
11962 return -EINVAL; 11965 return -EINVAL;
11963 } 11966 }
11964 } 11967 }
@@ -11970,8 +11973,7 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11970 11973
11971 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { 11974 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11972 if (be16_to_cpu(ops_offsets[i]) > num_ops) { 11975 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11973 printk(KERN_ERR PFX "Section offset %d is out of " 11976 pr_err("Section offset %d is out of bounds\n", i);
11974 "bounds\n", i);
11975 return -EINVAL; 11977 return -EINVAL;
11976 } 11978 }
11977 } 11979 }
@@ -11983,8 +11985,7 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11983 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) || 11985 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11984 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) || 11986 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11985 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) { 11987 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11986 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d." 11988 pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
11987 " Should be %d.%d.%d.%d\n",
11988 fw_ver[0], fw_ver[1], fw_ver[2], 11989 fw_ver[0], fw_ver[1], fw_ver[2],
11989 fw_ver[3], BCM_5710_FW_MAJOR_VERSION, 11990 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11990 BCM_5710_FW_MINOR_VERSION, 11991 BCM_5710_FW_MINOR_VERSION,
@@ -12034,18 +12035,17 @@ static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12034 target[i] = be16_to_cpu(source[i]); 12035 target[i] = be16_to_cpu(source[i]);
12035} 12036}
12036 12037
12037#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \ 12038#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12038 do { \ 12039do { \
12039 u32 len = be32_to_cpu(fw_hdr->arr.len); \ 12040 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12040 bp->arr = kmalloc(len, GFP_KERNEL); \ 12041 bp->arr = kmalloc(len, GFP_KERNEL); \
12041 if (!bp->arr) { \ 12042 if (!bp->arr) { \
12042 printk(KERN_ERR PFX "Failed to allocate %d bytes " \ 12043 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
12043 "for "#arr"\n", len); \ 12044 goto lbl; \
12044 goto lbl; \ 12045 } \
12045 } \ 12046 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12046 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \ 12047 (u8 *)bp->arr, len); \
12047 (u8 *)bp->arr, len); \ 12048} while (0)
12048 } while (0)
12049 12049
12050static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev) 12050static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12051{ 12051{
@@ -12058,18 +12058,17 @@ static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12058 else 12058 else
12059 fw_file_name = FW_FILE_NAME_E1H; 12059 fw_file_name = FW_FILE_NAME_E1H;
12060 12060
12061 printk(KERN_INFO PFX "Loading %s\n", fw_file_name); 12061 pr_info("Loading %s\n", fw_file_name);
12062 12062
12063 rc = request_firmware(&bp->firmware, fw_file_name, dev); 12063 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12064 if (rc) { 12064 if (rc) {
12065 printk(KERN_ERR PFX "Can't load firmware file %s\n", 12065 pr_err("Can't load firmware file %s\n", fw_file_name);
12066 fw_file_name);
12067 goto request_firmware_exit; 12066 goto request_firmware_exit;
12068 } 12067 }
12069 12068
12070 rc = bnx2x_check_firmware(bp); 12069 rc = bnx2x_check_firmware(bp);
12071 if (rc) { 12070 if (rc) {
12072 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name); 12071 pr_err("Corrupt firmware file %s\n", fw_file_name);
12073 goto request_firmware_exit; 12072 goto request_firmware_exit;
12074 } 12073 }
12075 12074
@@ -12128,12 +12127,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12128 /* dev zeroed in init_etherdev */ 12127 /* dev zeroed in init_etherdev */
12129 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT); 12128 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12130 if (!dev) { 12129 if (!dev) {
12131 printk(KERN_ERR PFX "Cannot allocate net device\n"); 12130 pr_err("Cannot allocate net device\n");
12132 return -ENOMEM; 12131 return -ENOMEM;
12133 } 12132 }
12134 12133
12135 bp = netdev_priv(dev); 12134 bp = netdev_priv(dev);
12136 bp->msglevel = debug; 12135 bp->msg_enable = debug;
12137 12136
12138 pci_set_drvdata(pdev, dev); 12137 pci_set_drvdata(pdev, dev);
12139 12138
@@ -12150,7 +12149,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12150 /* Set init arrays */ 12149 /* Set init arrays */
12151 rc = bnx2x_init_firmware(bp, &pdev->dev); 12150 rc = bnx2x_init_firmware(bp, &pdev->dev);
12152 if (rc) { 12151 if (rc) {
12153 printk(KERN_ERR PFX "Error loading firmware\n"); 12152 pr_err("Error loading firmware\n");
12154 goto init_one_exit; 12153 goto init_one_exit;
12155 } 12154 }
12156 12155
@@ -12161,12 +12160,11 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12161 } 12160 }
12162 12161
12163 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); 12162 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12164 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx," 12163 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
12165 " IRQ %d, ", dev->name, board_info[ent->driver_data].name, 12164 board_info[ent->driver_data].name,
12166 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), 12165 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12167 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz", 12166 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12168 dev->base_addr, bp->pdev->irq); 12167 dev->base_addr, bp->pdev->irq, dev->dev_addr);
12169 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
12170 12168
12171 return 0; 12169 return 0;
12172 12170
@@ -12194,7 +12192,7 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12194 struct bnx2x *bp; 12192 struct bnx2x *bp;
12195 12193
12196 if (!dev) { 12194 if (!dev) {
12197 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n"); 12195 pr_err("BAD net device from bnx2x_init_one\n");
12198 return; 12196 return;
12199 } 12197 }
12200 bp = netdev_priv(dev); 12198 bp = netdev_priv(dev);
@@ -12227,7 +12225,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12227 struct bnx2x *bp; 12225 struct bnx2x *bp;
12228 12226
12229 if (!dev) { 12227 if (!dev) {
12230 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n"); 12228 pr_err("BAD net device from bnx2x_init_one\n");
12231 return -ENODEV; 12229 return -ENODEV;
12232 } 12230 }
12233 bp = netdev_priv(dev); 12231 bp = netdev_priv(dev);
@@ -12259,7 +12257,7 @@ static int bnx2x_resume(struct pci_dev *pdev)
12259 int rc; 12257 int rc;
12260 12258
12261 if (!dev) { 12259 if (!dev) {
12262 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n"); 12260 pr_err("BAD net device from bnx2x_init_one\n");
12263 return -ENODEV; 12261 return -ENODEV;
12264 } 12262 }
12265 bp = netdev_priv(dev); 12263 bp = netdev_priv(dev);
@@ -12298,7 +12296,7 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12298 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); 12296 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12299 12297
12300 /* Release IRQs */ 12298 /* Release IRQs */
12301 bnx2x_free_irq(bp); 12299 bnx2x_free_irq(bp, false);
12302 12300
12303 if (CHIP_IS_E1(bp)) { 12301 if (CHIP_IS_E1(bp)) {
12304 struct mac_configuration_cmd *config = 12302 struct mac_configuration_cmd *config =
@@ -12462,17 +12460,17 @@ static int __init bnx2x_init(void)
12462{ 12460{
12463 int ret; 12461 int ret;
12464 12462
12465 printk(KERN_INFO "%s", version); 12463 pr_info("%s", version);
12466 12464
12467 bnx2x_wq = create_singlethread_workqueue("bnx2x"); 12465 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12468 if (bnx2x_wq == NULL) { 12466 if (bnx2x_wq == NULL) {
12469 printk(KERN_ERR PFX "Cannot create workqueue\n"); 12467 pr_err("Cannot create workqueue\n");
12470 return -ENOMEM; 12468 return -ENOMEM;
12471 } 12469 }
12472 12470
12473 ret = pci_register_driver(&bnx2x_pci_driver); 12471 ret = pci_register_driver(&bnx2x_pci_driver);
12474 if (ret) { 12472 if (ret) {
12475 printk(KERN_ERR PFX "Cannot register driver\n"); 12473 pr_err("Cannot register driver\n");
12476 destroy_workqueue(bnx2x_wq); 12474 destroy_workqueue(bnx2x_wq);
12477 } 12475 }
12478 return ret; 12476 return ret;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index efa0e41bf3ec..430c02267d7e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2615,6 +2615,17 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2615 unsigned char *arp_ptr; 2615 unsigned char *arp_ptr;
2616 __be32 sip, tip; 2616 __be32 sip, tip;
2617 2617
2618 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2619 /*
2620 * When using VLANS and bonding, dev and oriv_dev may be
2621 * incorrect if the physical interface supports VLAN
2622 * acceleration. With this change ARP validation now
2623 * works for hosts only reachable on the VLAN interface.
2624 */
2625 dev = vlan_dev_real_dev(dev);
2626 orig_dev = dev_get_by_index_rcu(dev_net(skb->dev),skb->skb_iif);
2627 }
2628
2618 if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER)) 2629 if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER))
2619 goto out; 2630 goto out;
2620 2631
@@ -3296,7 +3307,7 @@ static void bond_remove_proc_entry(struct bonding *bond)
3296/* Create the bonding directory under /proc/net, if doesn't exist yet. 3307/* Create the bonding directory under /proc/net, if doesn't exist yet.
3297 * Caller must hold rtnl_lock. 3308 * Caller must hold rtnl_lock.
3298 */ 3309 */
3299static void bond_create_proc_dir(struct bond_net *bn) 3310static void __net_init bond_create_proc_dir(struct bond_net *bn)
3300{ 3311{
3301 if (!bn->proc_dir) { 3312 if (!bn->proc_dir) {
3302 bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net); 3313 bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
@@ -3309,7 +3320,7 @@ static void bond_create_proc_dir(struct bond_net *bn)
3309/* Destroy the bonding directory under /proc/net, if empty. 3320/* Destroy the bonding directory under /proc/net, if empty.
3310 * Caller must hold rtnl_lock. 3321 * Caller must hold rtnl_lock.
3311 */ 3322 */
3312static void bond_destroy_proc_dir(struct bond_net *bn) 3323static void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
3313{ 3324{
3314 if (bn->proc_dir) { 3325 if (bn->proc_dir) {
3315 remove_proc_entry(DRV_NAME, bn->net->proc_net); 3326 remove_proc_entry(DRV_NAME, bn->net->proc_net);
@@ -3327,11 +3338,11 @@ static void bond_remove_proc_entry(struct bonding *bond)
3327{ 3338{
3328} 3339}
3329 3340
3330static void bond_create_proc_dir(struct bond_net *bn) 3341static inline void bond_create_proc_dir(struct bond_net *bn)
3331{ 3342{
3332} 3343}
3333 3344
3334static void bond_destroy_proc_dir(struct bond_net *bn) 3345static inline void bond_destroy_proc_dir(struct bond_net *bn)
3335{ 3346{
3336} 3347}
3337 3348
@@ -3731,7 +3742,7 @@ static int bond_close(struct net_device *bond_dev)
3731static struct net_device_stats *bond_get_stats(struct net_device *bond_dev) 3742static struct net_device_stats *bond_get_stats(struct net_device *bond_dev)
3732{ 3743{
3733 struct bonding *bond = netdev_priv(bond_dev); 3744 struct bonding *bond = netdev_priv(bond_dev);
3734 struct net_device_stats *stats = &bond->stats; 3745 struct net_device_stats *stats = &bond_dev->stats;
3735 struct net_device_stats local_stats; 3746 struct net_device_stats local_stats;
3736 struct slave *slave; 3747 struct slave *slave;
3737 int i; 3748 int i;
@@ -4935,6 +4946,8 @@ int bond_create(struct net *net, const char *name)
4935 } 4946 }
4936 4947
4937 res = register_netdevice(bond_dev); 4948 res = register_netdevice(bond_dev);
4949 if (res < 0)
4950 goto out_netdev;
4938 4951
4939out: 4952out:
4940 rtnl_unlock(); 4953 rtnl_unlock();
@@ -4944,7 +4957,7 @@ out_netdev:
4944 goto out; 4957 goto out;
4945} 4958}
4946 4959
4947static int bond_net_init(struct net *net) 4960static int __net_init bond_net_init(struct net *net)
4948{ 4961{
4949 struct bond_net *bn = net_generic(net, bond_net_id); 4962 struct bond_net *bn = net_generic(net, bond_net_id);
4950 4963
@@ -4956,7 +4969,7 @@ static int bond_net_init(struct net *net)
4956 return 0; 4969 return 0;
4957} 4970}
4958 4971
4959static void bond_net_exit(struct net *net) 4972static void __net_exit bond_net_exit(struct net *net)
4960{ 4973{
4961 struct bond_net *bn = net_generic(net, bond_net_id); 4974 struct bond_net *bn = net_generic(net, bond_net_id);
4962 4975
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 558ec1352527..257a7a4dfce9 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -197,7 +197,6 @@ struct bonding {
197 s8 send_grat_arp; 197 s8 send_grat_arp;
198 s8 send_unsol_na; 198 s8 send_unsol_na;
199 s8 setup_by_slave; 199 s8 setup_by_slave;
200 struct net_device_stats stats;
201#ifdef CONFIG_PROC_FS 200#ifdef CONFIG_PROC_FS
202 struct proc_dir_entry *proc_entry; 201 struct proc_dir_entry *proc_entry;
203 char proc_file_name[IFNAMSIZ]; 202 char proc_file_name[IFNAMSIZ];
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 166cc7e579c0..a2f29a38798a 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -342,6 +342,9 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
342 unsigned int mb, prio; 342 unsigned int mb, prio;
343 u32 reg_mid, reg_mcr; 343 u32 reg_mid, reg_mcr;
344 344
345 if (can_dropped_invalid_skb(dev, skb))
346 return NETDEV_TX_OK;
347
345 mb = get_tx_next_mb(priv); 348 mb = get_tx_next_mb(priv);
346 prio = get_tx_next_prio(priv); 349 prio = get_tx_next_prio(priv);
347 350
@@ -1070,6 +1073,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
1070 priv->can.bittiming_const = &at91_bittiming_const; 1073 priv->can.bittiming_const = &at91_bittiming_const;
1071 priv->can.do_set_bittiming = at91_set_bittiming; 1074 priv->can.do_set_bittiming = at91_set_bittiming;
1072 priv->can.do_set_mode = at91_set_mode; 1075 priv->can.do_set_mode = at91_set_mode;
1076 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
1073 priv->reg_base = addr; 1077 priv->reg_base = addr;
1074 priv->dev = dev; 1078 priv->dev = dev;
1075 priv->clk = clk; 1079 priv->clk = clk;
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 0ec1524523cc..bf7f9ba2d903 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -318,6 +318,9 @@ static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
318 u16 val; 318 u16 val;
319 int i; 319 int i;
320 320
321 if (can_dropped_invalid_skb(dev, skb))
322 return NETDEV_TX_OK;
323
321 netif_stop_queue(dev); 324 netif_stop_queue(dev);
322 325
323 /* fill id */ 326 /* fill id */
@@ -600,6 +603,7 @@ struct net_device *alloc_bfin_candev(void)
600 priv->can.bittiming_const = &bfin_can_bittiming_const; 603 priv->can.bittiming_const = &bfin_can_bittiming_const;
601 priv->can.do_set_bittiming = bfin_can_set_bittiming; 604 priv->can.do_set_bittiming = bfin_can_set_bittiming;
602 priv->can.do_set_mode = bfin_can_set_mode; 605 priv->can.do_set_mode = bfin_can_set_mode;
606 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
603 607
604 return dev; 608 return dev;
605} 609}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index c1bb29f0322b..904aa369f80e 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -574,6 +574,7 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
574 [IFLA_CAN_BITTIMING_CONST] 574 [IFLA_CAN_BITTIMING_CONST]
575 = { .len = sizeof(struct can_bittiming_const) }, 575 = { .len = sizeof(struct can_bittiming_const) },
576 [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) }, 576 [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
577 [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
577}; 578};
578 579
579static int can_changelink(struct net_device *dev, 580static int can_changelink(struct net_device *dev,
@@ -592,6 +593,8 @@ static int can_changelink(struct net_device *dev,
592 if (dev->flags & IFF_UP) 593 if (dev->flags & IFF_UP)
593 return -EBUSY; 594 return -EBUSY;
594 cm = nla_data(data[IFLA_CAN_CTRLMODE]); 595 cm = nla_data(data[IFLA_CAN_CTRLMODE]);
596 if (cm->flags & ~priv->ctrlmode_supported)
597 return -EOPNOTSUPP;
595 priv->ctrlmode &= ~cm->mask; 598 priv->ctrlmode &= ~cm->mask;
596 priv->ctrlmode |= cm->flags; 599 priv->ctrlmode |= cm->flags;
597 } 600 }
@@ -647,6 +650,8 @@ static size_t can_get_size(const struct net_device *dev)
647 size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */ 650 size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
648 size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */ 651 size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */
649 size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */ 652 size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */
653 if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
654 size += sizeof(struct can_berr_counter);
650 if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */ 655 if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
651 size += sizeof(struct can_bittiming_const); 656 size += sizeof(struct can_bittiming_const);
652 657
@@ -657,6 +662,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
657{ 662{
658 struct can_priv *priv = netdev_priv(dev); 663 struct can_priv *priv = netdev_priv(dev);
659 struct can_ctrlmode cm = {.flags = priv->ctrlmode}; 664 struct can_ctrlmode cm = {.flags = priv->ctrlmode};
665 struct can_berr_counter bec;
660 enum can_state state = priv->state; 666 enum can_state state = priv->state;
661 667
662 if (priv->do_get_state) 668 if (priv->do_get_state)
@@ -667,6 +673,8 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
667 NLA_PUT(skb, IFLA_CAN_BITTIMING, 673 NLA_PUT(skb, IFLA_CAN_BITTIMING,
668 sizeof(priv->bittiming), &priv->bittiming); 674 sizeof(priv->bittiming), &priv->bittiming);
669 NLA_PUT(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock); 675 NLA_PUT(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock);
676 if (priv->do_get_berr_counter && !priv->do_get_berr_counter(dev, &bec))
677 NLA_PUT(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec);
670 if (priv->bittiming_const) 678 if (priv->bittiming_const)
671 NLA_PUT(skb, IFLA_CAN_BITTIMING_CONST, 679 NLA_PUT(skb, IFLA_CAN_BITTIMING_CONST,
672 sizeof(*priv->bittiming_const), priv->bittiming_const); 680 sizeof(*priv->bittiming_const), priv->bittiming_const);
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 1a72ca066a17..f8cc168ec76c 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -180,6 +180,14 @@
180#define RXBEID0_OFF 4 180#define RXBEID0_OFF 4
181#define RXBDLC_OFF 5 181#define RXBDLC_OFF 5
182#define RXBDAT_OFF 6 182#define RXBDAT_OFF 6
183#define RXFSIDH(n) ((n) * 4)
184#define RXFSIDL(n) ((n) * 4 + 1)
185#define RXFEID8(n) ((n) * 4 + 2)
186#define RXFEID0(n) ((n) * 4 + 3)
187#define RXMSIDH(n) ((n) * 4 + 0x20)
188#define RXMSIDL(n) ((n) * 4 + 0x21)
189#define RXMEID8(n) ((n) * 4 + 0x22)
190#define RXMEID0(n) ((n) * 4 + 0x23)
183 191
184#define GET_BYTE(val, byte) \ 192#define GET_BYTE(val, byte) \
185 (((val) >> ((byte) * 8)) & 0xff) 193 (((val) >> ((byte) * 8)) & 0xff)
@@ -219,7 +227,8 @@ struct mcp251x_priv {
219 struct net_device *net; 227 struct net_device *net;
220 struct spi_device *spi; 228 struct spi_device *spi;
221 229
222 struct mutex spi_lock; /* SPI buffer lock */ 230 struct mutex mcp_lock; /* SPI device lock */
231
223 u8 *spi_tx_buf; 232 u8 *spi_tx_buf;
224 u8 *spi_rx_buf; 233 u8 *spi_rx_buf;
225 dma_addr_t spi_tx_dma; 234 dma_addr_t spi_tx_dma;
@@ -227,11 +236,11 @@ struct mcp251x_priv {
227 236
228 struct sk_buff *tx_skb; 237 struct sk_buff *tx_skb;
229 int tx_len; 238 int tx_len;
239
230 struct workqueue_struct *wq; 240 struct workqueue_struct *wq;
231 struct work_struct tx_work; 241 struct work_struct tx_work;
232 struct work_struct irq_work; 242 struct work_struct restart_work;
233 struct completion awake; 243
234 int wake;
235 int force_quit; 244 int force_quit;
236 int after_suspend; 245 int after_suspend;
237#define AFTER_SUSPEND_UP 1 246#define AFTER_SUSPEND_UP 1
@@ -245,7 +254,8 @@ static void mcp251x_clean(struct net_device *net)
245{ 254{
246 struct mcp251x_priv *priv = netdev_priv(net); 255 struct mcp251x_priv *priv = netdev_priv(net);
247 256
248 net->stats.tx_errors++; 257 if (priv->tx_skb || priv->tx_len)
258 net->stats.tx_errors++;
249 if (priv->tx_skb) 259 if (priv->tx_skb)
250 dev_kfree_skb(priv->tx_skb); 260 dev_kfree_skb(priv->tx_skb);
251 if (priv->tx_len) 261 if (priv->tx_len)
@@ -300,16 +310,12 @@ static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
300 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 310 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
301 u8 val = 0; 311 u8 val = 0;
302 312
303 mutex_lock(&priv->spi_lock);
304
305 priv->spi_tx_buf[0] = INSTRUCTION_READ; 313 priv->spi_tx_buf[0] = INSTRUCTION_READ;
306 priv->spi_tx_buf[1] = reg; 314 priv->spi_tx_buf[1] = reg;
307 315
308 mcp251x_spi_trans(spi, 3); 316 mcp251x_spi_trans(spi, 3);
309 val = priv->spi_rx_buf[2]; 317 val = priv->spi_rx_buf[2];
310 318
311 mutex_unlock(&priv->spi_lock);
312
313 return val; 319 return val;
314} 320}
315 321
@@ -317,15 +323,11 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
317{ 323{
318 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 324 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
319 325
320 mutex_lock(&priv->spi_lock);
321
322 priv->spi_tx_buf[0] = INSTRUCTION_WRITE; 326 priv->spi_tx_buf[0] = INSTRUCTION_WRITE;
323 priv->spi_tx_buf[1] = reg; 327 priv->spi_tx_buf[1] = reg;
324 priv->spi_tx_buf[2] = val; 328 priv->spi_tx_buf[2] = val;
325 329
326 mcp251x_spi_trans(spi, 3); 330 mcp251x_spi_trans(spi, 3);
327
328 mutex_unlock(&priv->spi_lock);
329} 331}
330 332
331static void mcp251x_write_bits(struct spi_device *spi, u8 reg, 333static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
@@ -333,16 +335,12 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
333{ 335{
334 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 336 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
335 337
336 mutex_lock(&priv->spi_lock);
337
338 priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY; 338 priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY;
339 priv->spi_tx_buf[1] = reg; 339 priv->spi_tx_buf[1] = reg;
340 priv->spi_tx_buf[2] = mask; 340 priv->spi_tx_buf[2] = mask;
341 priv->spi_tx_buf[3] = val; 341 priv->spi_tx_buf[3] = val;
342 342
343 mcp251x_spi_trans(spi, 4); 343 mcp251x_spi_trans(spi, 4);
344
345 mutex_unlock(&priv->spi_lock);
346} 344}
347 345
348static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf, 346static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
@@ -358,10 +356,8 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
358 mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx) + i, 356 mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx) + i,
359 buf[i]); 357 buf[i]);
360 } else { 358 } else {
361 mutex_lock(&priv->spi_lock);
362 memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len); 359 memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len);
363 mcp251x_spi_trans(spi, TXBDAT_OFF + len); 360 mcp251x_spi_trans(spi, TXBDAT_OFF + len);
364 mutex_unlock(&priv->spi_lock);
365 } 361 }
366} 362}
367 363
@@ -408,13 +404,9 @@ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
408 for (; i < (RXBDAT_OFF + len); i++) 404 for (; i < (RXBDAT_OFF + len); i++)
409 buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i); 405 buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
410 } else { 406 } else {
411 mutex_lock(&priv->spi_lock);
412
413 priv->spi_tx_buf[RXBCTRL_OFF] = INSTRUCTION_READ_RXB(buf_idx); 407 priv->spi_tx_buf[RXBCTRL_OFF] = INSTRUCTION_READ_RXB(buf_idx);
414 mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN); 408 mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN);
415 memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN); 409 memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN);
416
417 mutex_unlock(&priv->spi_lock);
418 } 410 }
419} 411}
420 412
@@ -467,21 +459,6 @@ static void mcp251x_hw_sleep(struct spi_device *spi)
467 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP); 459 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP);
468} 460}
469 461
470static void mcp251x_hw_wakeup(struct spi_device *spi)
471{
472 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
473
474 priv->wake = 1;
475
476 /* Can only wake up by generating a wake-up interrupt. */
477 mcp251x_write_bits(spi, CANINTE, CANINTE_WAKIE, CANINTE_WAKIE);
478 mcp251x_write_bits(spi, CANINTF, CANINTF_WAKIF, CANINTF_WAKIF);
479
480 /* Wait until the device is awake */
481 if (!wait_for_completion_timeout(&priv->awake, HZ))
482 dev_err(&spi->dev, "MCP251x didn't wake-up\n");
483}
484
485static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb, 462static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
486 struct net_device *net) 463 struct net_device *net)
487{ 464{
@@ -490,16 +467,11 @@ static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
490 467
491 if (priv->tx_skb || priv->tx_len) { 468 if (priv->tx_skb || priv->tx_len) {
492 dev_warn(&spi->dev, "hard_xmit called while tx busy\n"); 469 dev_warn(&spi->dev, "hard_xmit called while tx busy\n");
493 netif_stop_queue(net);
494 return NETDEV_TX_BUSY; 470 return NETDEV_TX_BUSY;
495 } 471 }
496 472
497 if (skb->len != sizeof(struct can_frame)) { 473 if (can_dropped_invalid_skb(net, skb))
498 dev_err(&spi->dev, "dropping packet - bad length\n");
499 dev_kfree_skb(skb);
500 net->stats.tx_dropped++;
501 return NETDEV_TX_OK; 474 return NETDEV_TX_OK;
502 }
503 475
504 netif_stop_queue(net); 476 netif_stop_queue(net);
505 priv->tx_skb = skb; 477 priv->tx_skb = skb;
@@ -515,12 +487,13 @@ static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
515 487
516 switch (mode) { 488 switch (mode) {
517 case CAN_MODE_START: 489 case CAN_MODE_START:
490 mcp251x_clean(net);
518 /* We have to delay work since SPI I/O may sleep */ 491 /* We have to delay work since SPI I/O may sleep */
519 priv->can.state = CAN_STATE_ERROR_ACTIVE; 492 priv->can.state = CAN_STATE_ERROR_ACTIVE;
520 priv->restart_tx = 1; 493 priv->restart_tx = 1;
521 if (priv->can.restart_ms == 0) 494 if (priv->can.restart_ms == 0)
522 priv->after_suspend = AFTER_SUSPEND_RESTART; 495 priv->after_suspend = AFTER_SUSPEND_RESTART;
523 queue_work(priv->wq, &priv->irq_work); 496 queue_work(priv->wq, &priv->restart_work);
524 break; 497 break;
525 default: 498 default:
526 return -EOPNOTSUPP; 499 return -EOPNOTSUPP;
@@ -529,7 +502,7 @@ static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
529 return 0; 502 return 0;
530} 503}
531 504
532static void mcp251x_set_normal_mode(struct spi_device *spi) 505static int mcp251x_set_normal_mode(struct spi_device *spi)
533{ 506{
534 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 507 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
535 unsigned long timeout; 508 unsigned long timeout;
@@ -537,12 +510,14 @@ static void mcp251x_set_normal_mode(struct spi_device *spi)
537 /* Enable interrupts */ 510 /* Enable interrupts */
538 mcp251x_write_reg(spi, CANINTE, 511 mcp251x_write_reg(spi, CANINTE,
539 CANINTE_ERRIE | CANINTE_TX2IE | CANINTE_TX1IE | 512 CANINTE_ERRIE | CANINTE_TX2IE | CANINTE_TX1IE |
540 CANINTE_TX0IE | CANINTE_RX1IE | CANINTE_RX0IE | 513 CANINTE_TX0IE | CANINTE_RX1IE | CANINTE_RX0IE);
541 CANINTF_MERRF);
542 514
543 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { 515 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
544 /* Put device into loopback mode */ 516 /* Put device into loopback mode */
545 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK); 517 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK);
518 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
519 /* Put device into listen-only mode */
520 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LISTEN_ONLY);
546 } else { 521 } else {
547 /* Put device into normal mode */ 522 /* Put device into normal mode */
548 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL); 523 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL);
@@ -554,11 +529,12 @@ static void mcp251x_set_normal_mode(struct spi_device *spi)
554 if (time_after(jiffies, timeout)) { 529 if (time_after(jiffies, timeout)) {
555 dev_err(&spi->dev, "MCP251x didn't" 530 dev_err(&spi->dev, "MCP251x didn't"
556 " enter in normal mode\n"); 531 " enter in normal mode\n");
557 return; 532 return -EBUSY;
558 } 533 }
559 } 534 }
560 } 535 }
561 priv->can.state = CAN_STATE_ERROR_ACTIVE; 536 priv->can.state = CAN_STATE_ERROR_ACTIVE;
537 return 0;
562} 538}
563 539
564static int mcp251x_do_set_bittiming(struct net_device *net) 540static int mcp251x_do_set_bittiming(struct net_device *net)
@@ -589,33 +565,39 @@ static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
589{ 565{
590 mcp251x_do_set_bittiming(net); 566 mcp251x_do_set_bittiming(net);
591 567
592 /* Enable RX0->RX1 buffer roll over and disable filters */ 568 mcp251x_write_reg(spi, RXBCTRL(0),
593 mcp251x_write_bits(spi, RXBCTRL(0), 569 RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1);
594 RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1, 570 mcp251x_write_reg(spi, RXBCTRL(1),
595 RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1); 571 RXBCTRL_RXM0 | RXBCTRL_RXM1);
596 mcp251x_write_bits(spi, RXBCTRL(1),
597 RXBCTRL_RXM0 | RXBCTRL_RXM1,
598 RXBCTRL_RXM0 | RXBCTRL_RXM1);
599 return 0; 572 return 0;
600} 573}
601 574
602static void mcp251x_hw_reset(struct spi_device *spi) 575static int mcp251x_hw_reset(struct spi_device *spi)
603{ 576{
604 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 577 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
605 int ret; 578 int ret;
606 579 unsigned long timeout;
607 mutex_lock(&priv->spi_lock);
608 580
609 priv->spi_tx_buf[0] = INSTRUCTION_RESET; 581 priv->spi_tx_buf[0] = INSTRUCTION_RESET;
610
611 ret = spi_write(spi, priv->spi_tx_buf, 1); 582 ret = spi_write(spi, priv->spi_tx_buf, 1);
612 583 if (ret) {
613 mutex_unlock(&priv->spi_lock);
614
615 if (ret)
616 dev_err(&spi->dev, "reset failed: ret = %d\n", ret); 584 dev_err(&spi->dev, "reset failed: ret = %d\n", ret);
585 return -EIO;
586 }
587
617 /* Wait for reset to finish */ 588 /* Wait for reset to finish */
589 timeout = jiffies + HZ;
618 mdelay(10); 590 mdelay(10);
591 while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK)
592 != CANCTRL_REQOP_CONF) {
593 schedule();
594 if (time_after(jiffies, timeout)) {
595 dev_err(&spi->dev, "MCP251x didn't"
596 " enter in conf mode after reset\n");
597 return -EBUSY;
598 }
599 }
600 return 0;
619} 601}
620 602
621static int mcp251x_hw_probe(struct spi_device *spi) 603static int mcp251x_hw_probe(struct spi_device *spi)
@@ -639,63 +621,17 @@ static int mcp251x_hw_probe(struct spi_device *spi)
639 return (st1 == 0x80 && st2 == 0x07) ? 1 : 0; 621 return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
640} 622}
641 623
642static irqreturn_t mcp251x_can_isr(int irq, void *dev_id) 624static void mcp251x_open_clean(struct net_device *net)
643{
644 struct net_device *net = (struct net_device *)dev_id;
645 struct mcp251x_priv *priv = netdev_priv(net);
646
647 /* Schedule bottom half */
648 if (!work_pending(&priv->irq_work))
649 queue_work(priv->wq, &priv->irq_work);
650
651 return IRQ_HANDLED;
652}
653
654static int mcp251x_open(struct net_device *net)
655{ 625{
656 struct mcp251x_priv *priv = netdev_priv(net); 626 struct mcp251x_priv *priv = netdev_priv(net);
657 struct spi_device *spi = priv->spi; 627 struct spi_device *spi = priv->spi;
658 struct mcp251x_platform_data *pdata = spi->dev.platform_data; 628 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
659 int ret;
660
661 ret = open_candev(net);
662 if (ret) {
663 dev_err(&spi->dev, "unable to set initial baudrate!\n");
664 return ret;
665 }
666 629
630 free_irq(spi->irq, priv);
631 mcp251x_hw_sleep(spi);
667 if (pdata->transceiver_enable) 632 if (pdata->transceiver_enable)
668 pdata->transceiver_enable(1); 633 pdata->transceiver_enable(0);
669 634 close_candev(net);
670 priv->force_quit = 0;
671 priv->tx_skb = NULL;
672 priv->tx_len = 0;
673
674 ret = request_irq(spi->irq, mcp251x_can_isr,
675 IRQF_TRIGGER_FALLING, DEVICE_NAME, net);
676 if (ret) {
677 dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
678 if (pdata->transceiver_enable)
679 pdata->transceiver_enable(0);
680 close_candev(net);
681 return ret;
682 }
683
684 mcp251x_hw_wakeup(spi);
685 mcp251x_hw_reset(spi);
686 ret = mcp251x_setup(net, priv, spi);
687 if (ret) {
688 free_irq(spi->irq, net);
689 mcp251x_hw_sleep(spi);
690 if (pdata->transceiver_enable)
691 pdata->transceiver_enable(0);
692 close_candev(net);
693 return ret;
694 }
695 mcp251x_set_normal_mode(spi);
696 netif_wake_queue(net);
697
698 return 0;
699} 635}
700 636
701static int mcp251x_stop(struct net_device *net) 637static int mcp251x_stop(struct net_device *net)
@@ -706,17 +642,19 @@ static int mcp251x_stop(struct net_device *net)
706 642
707 close_candev(net); 643 close_candev(net);
708 644
645 priv->force_quit = 1;
646 free_irq(spi->irq, priv);
647 destroy_workqueue(priv->wq);
648 priv->wq = NULL;
649
650 mutex_lock(&priv->mcp_lock);
651
709 /* Disable and clear pending interrupts */ 652 /* Disable and clear pending interrupts */
710 mcp251x_write_reg(spi, CANINTE, 0x00); 653 mcp251x_write_reg(spi, CANINTE, 0x00);
711 mcp251x_write_reg(spi, CANINTF, 0x00); 654 mcp251x_write_reg(spi, CANINTF, 0x00);
712 655
713 priv->force_quit = 1;
714 free_irq(spi->irq, net);
715 flush_workqueue(priv->wq);
716
717 mcp251x_write_reg(spi, TXBCTRL(0), 0); 656 mcp251x_write_reg(spi, TXBCTRL(0), 0);
718 if (priv->tx_skb || priv->tx_len) 657 mcp251x_clean(net);
719 mcp251x_clean(net);
720 658
721 mcp251x_hw_sleep(spi); 659 mcp251x_hw_sleep(spi);
722 660
@@ -725,9 +663,27 @@ static int mcp251x_stop(struct net_device *net)
725 663
726 priv->can.state = CAN_STATE_STOPPED; 664 priv->can.state = CAN_STATE_STOPPED;
727 665
666 mutex_unlock(&priv->mcp_lock);
667
728 return 0; 668 return 0;
729} 669}
730 670
671static void mcp251x_error_skb(struct net_device *net, int can_id, int data1)
672{
673 struct sk_buff *skb;
674 struct can_frame *frame;
675
676 skb = alloc_can_err_skb(net, &frame);
677 if (skb) {
678 frame->can_id = can_id;
679 frame->data[1] = data1;
680 netif_rx(skb);
681 } else {
682 dev_err(&net->dev,
683 "cannot allocate error skb\n");
684 }
685}
686
731static void mcp251x_tx_work_handler(struct work_struct *ws) 687static void mcp251x_tx_work_handler(struct work_struct *ws)
732{ 688{
733 struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv, 689 struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
@@ -736,33 +692,32 @@ static void mcp251x_tx_work_handler(struct work_struct *ws)
736 struct net_device *net = priv->net; 692 struct net_device *net = priv->net;
737 struct can_frame *frame; 693 struct can_frame *frame;
738 694
695 mutex_lock(&priv->mcp_lock);
739 if (priv->tx_skb) { 696 if (priv->tx_skb) {
740 frame = (struct can_frame *)priv->tx_skb->data;
741
742 if (priv->can.state == CAN_STATE_BUS_OFF) { 697 if (priv->can.state == CAN_STATE_BUS_OFF) {
743 mcp251x_clean(net); 698 mcp251x_clean(net);
744 netif_wake_queue(net); 699 } else {
745 return; 700 frame = (struct can_frame *)priv->tx_skb->data;
701
702 if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN)
703 frame->can_dlc = CAN_FRAME_MAX_DATA_LEN;
704 mcp251x_hw_tx(spi, frame, 0);
705 priv->tx_len = 1 + frame->can_dlc;
706 can_put_echo_skb(priv->tx_skb, net, 0);
707 priv->tx_skb = NULL;
746 } 708 }
747 if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN)
748 frame->can_dlc = CAN_FRAME_MAX_DATA_LEN;
749 mcp251x_hw_tx(spi, frame, 0);
750 priv->tx_len = 1 + frame->can_dlc;
751 can_put_echo_skb(priv->tx_skb, net, 0);
752 priv->tx_skb = NULL;
753 } 709 }
710 mutex_unlock(&priv->mcp_lock);
754} 711}
755 712
756static void mcp251x_irq_work_handler(struct work_struct *ws) 713static void mcp251x_restart_work_handler(struct work_struct *ws)
757{ 714{
758 struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv, 715 struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
759 irq_work); 716 restart_work);
760 struct spi_device *spi = priv->spi; 717 struct spi_device *spi = priv->spi;
761 struct net_device *net = priv->net; 718 struct net_device *net = priv->net;
762 u8 txbnctrl;
763 u8 intf;
764 enum can_state new_state;
765 719
720 mutex_lock(&priv->mcp_lock);
766 if (priv->after_suspend) { 721 if (priv->after_suspend) {
767 mdelay(10); 722 mdelay(10);
768 mcp251x_hw_reset(spi); 723 mcp251x_hw_reset(spi);
@@ -771,45 +726,54 @@ static void mcp251x_irq_work_handler(struct work_struct *ws)
771 mcp251x_set_normal_mode(spi); 726 mcp251x_set_normal_mode(spi);
772 } else if (priv->after_suspend & AFTER_SUSPEND_UP) { 727 } else if (priv->after_suspend & AFTER_SUSPEND_UP) {
773 netif_device_attach(net); 728 netif_device_attach(net);
774 /* Clean since we lost tx buffer */ 729 mcp251x_clean(net);
775 if (priv->tx_skb || priv->tx_len) {
776 mcp251x_clean(net);
777 netif_wake_queue(net);
778 }
779 mcp251x_set_normal_mode(spi); 730 mcp251x_set_normal_mode(spi);
731 netif_wake_queue(net);
780 } else { 732 } else {
781 mcp251x_hw_sleep(spi); 733 mcp251x_hw_sleep(spi);
782 } 734 }
783 priv->after_suspend = 0; 735 priv->after_suspend = 0;
736 priv->force_quit = 0;
784 } 737 }
785 738
786 if (priv->can.restart_ms == 0 && priv->can.state == CAN_STATE_BUS_OFF) 739 if (priv->restart_tx) {
787 return; 740 priv->restart_tx = 0;
741 mcp251x_write_reg(spi, TXBCTRL(0), 0);
742 mcp251x_clean(net);
743 netif_wake_queue(net);
744 mcp251x_error_skb(net, CAN_ERR_RESTARTED, 0);
745 }
746 mutex_unlock(&priv->mcp_lock);
747}
788 748
789 while (!priv->force_quit && !freezing(current)) { 749static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
790 u8 eflag = mcp251x_read_reg(spi, EFLG); 750{
791 int can_id = 0, data1 = 0; 751 struct mcp251x_priv *priv = dev_id;
752 struct spi_device *spi = priv->spi;
753 struct net_device *net = priv->net;
792 754
793 mcp251x_write_reg(spi, EFLG, 0x00); 755 mutex_lock(&priv->mcp_lock);
756 while (!priv->force_quit) {
757 enum can_state new_state;
758 u8 intf = mcp251x_read_reg(spi, CANINTF);
759 u8 eflag;
760 int can_id = 0, data1 = 0;
794 761
795 if (priv->restart_tx) { 762 if (intf & CANINTF_RX0IF) {
796 priv->restart_tx = 0; 763 mcp251x_hw_rx(spi, 0);
797 mcp251x_write_reg(spi, TXBCTRL(0), 0); 764 /* Free one buffer ASAP */
798 if (priv->tx_skb || priv->tx_len) 765 mcp251x_write_bits(spi, CANINTF, intf & CANINTF_RX0IF,
799 mcp251x_clean(net); 766 0x00);
800 netif_wake_queue(net);
801 can_id |= CAN_ERR_RESTARTED;
802 } 767 }
803 768
804 if (priv->wake) { 769 if (intf & CANINTF_RX1IF)
805 /* Wait whilst the device wakes up */ 770 mcp251x_hw_rx(spi, 1);
806 mdelay(10);
807 priv->wake = 0;
808 }
809 771
810 intf = mcp251x_read_reg(spi, CANINTF);
811 mcp251x_write_bits(spi, CANINTF, intf, 0x00); 772 mcp251x_write_bits(spi, CANINTF, intf, 0x00);
812 773
774 eflag = mcp251x_read_reg(spi, EFLG);
775 mcp251x_write_reg(spi, EFLG, 0x00);
776
813 /* Update can state */ 777 /* Update can state */
814 if (eflag & EFLG_TXBO) { 778 if (eflag & EFLG_TXBO) {
815 new_state = CAN_STATE_BUS_OFF; 779 new_state = CAN_STATE_BUS_OFF;
@@ -850,59 +814,31 @@ static void mcp251x_irq_work_handler(struct work_struct *ws)
850 } 814 }
851 priv->can.state = new_state; 815 priv->can.state = new_state;
852 816
853 if ((intf & CANINTF_ERRIF) || (can_id & CAN_ERR_RESTARTED)) { 817 if (intf & CANINTF_ERRIF) {
854 struct sk_buff *skb; 818 /* Handle overflow counters */
855 struct can_frame *frame; 819 if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) {
856 820 if (eflag & EFLG_RX0OVR)
857 /* Create error frame */ 821 net->stats.rx_over_errors++;
858 skb = alloc_can_err_skb(net, &frame); 822 if (eflag & EFLG_RX1OVR)
859 if (skb) { 823 net->stats.rx_over_errors++;
860 /* Set error frame flags based on bus state */ 824 can_id |= CAN_ERR_CRTL;
861 frame->can_id = can_id; 825 data1 |= CAN_ERR_CRTL_RX_OVERFLOW;
862 frame->data[1] = data1;
863
864 /* Update net stats for overflows */
865 if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) {
866 if (eflag & EFLG_RX0OVR)
867 net->stats.rx_over_errors++;
868 if (eflag & EFLG_RX1OVR)
869 net->stats.rx_over_errors++;
870 frame->can_id |= CAN_ERR_CRTL;
871 frame->data[1] |=
872 CAN_ERR_CRTL_RX_OVERFLOW;
873 }
874
875 netif_rx(skb);
876 } else {
877 dev_info(&spi->dev,
878 "cannot allocate error skb\n");
879 } 826 }
827 mcp251x_error_skb(net, can_id, data1);
880 } 828 }
881 829
882 if (priv->can.state == CAN_STATE_BUS_OFF) { 830 if (priv->can.state == CAN_STATE_BUS_OFF) {
883 if (priv->can.restart_ms == 0) { 831 if (priv->can.restart_ms == 0) {
832 priv->force_quit = 1;
884 can_bus_off(net); 833 can_bus_off(net);
885 mcp251x_hw_sleep(spi); 834 mcp251x_hw_sleep(spi);
886 return; 835 break;
887 } 836 }
888 } 837 }
889 838
890 if (intf == 0) 839 if (intf == 0)
891 break; 840 break;
892 841
893 if (intf & CANINTF_WAKIF)
894 complete(&priv->awake);
895
896 if (intf & CANINTF_MERRF) {
897 /* If there are pending Tx buffers, restart queue */
898 txbnctrl = mcp251x_read_reg(spi, TXBCTRL(0));
899 if (!(txbnctrl & TXBCTRL_TXREQ)) {
900 if (priv->tx_skb || priv->tx_len)
901 mcp251x_clean(net);
902 netif_wake_queue(net);
903 }
904 }
905
906 if (intf & (CANINTF_TX2IF | CANINTF_TX1IF | CANINTF_TX0IF)) { 842 if (intf & (CANINTF_TX2IF | CANINTF_TX1IF | CANINTF_TX0IF)) {
907 net->stats.tx_packets++; 843 net->stats.tx_packets++;
908 net->stats.tx_bytes += priv->tx_len - 1; 844 net->stats.tx_bytes += priv->tx_len - 1;
@@ -913,12 +849,66 @@ static void mcp251x_irq_work_handler(struct work_struct *ws)
913 netif_wake_queue(net); 849 netif_wake_queue(net);
914 } 850 }
915 851
916 if (intf & CANINTF_RX0IF) 852 }
917 mcp251x_hw_rx(spi, 0); 853 mutex_unlock(&priv->mcp_lock);
854 return IRQ_HANDLED;
855}
918 856
919 if (intf & CANINTF_RX1IF) 857static int mcp251x_open(struct net_device *net)
920 mcp251x_hw_rx(spi, 1); 858{
859 struct mcp251x_priv *priv = netdev_priv(net);
860 struct spi_device *spi = priv->spi;
861 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
862 int ret;
863
864 ret = open_candev(net);
865 if (ret) {
866 dev_err(&spi->dev, "unable to set initial baudrate!\n");
867 return ret;
868 }
869
870 mutex_lock(&priv->mcp_lock);
871 if (pdata->transceiver_enable)
872 pdata->transceiver_enable(1);
873
874 priv->force_quit = 0;
875 priv->tx_skb = NULL;
876 priv->tx_len = 0;
877
878 ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
879 IRQF_TRIGGER_FALLING, DEVICE_NAME, priv);
880 if (ret) {
881 dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
882 if (pdata->transceiver_enable)
883 pdata->transceiver_enable(0);
884 close_candev(net);
885 goto open_unlock;
886 }
887
888 priv->wq = create_freezeable_workqueue("mcp251x_wq");
889 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
890 INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
891
892 ret = mcp251x_hw_reset(spi);
893 if (ret) {
894 mcp251x_open_clean(net);
895 goto open_unlock;
896 }
897 ret = mcp251x_setup(net, priv, spi);
898 if (ret) {
899 mcp251x_open_clean(net);
900 goto open_unlock;
921 } 901 }
902 ret = mcp251x_set_normal_mode(spi);
903 if (ret) {
904 mcp251x_open_clean(net);
905 goto open_unlock;
906 }
907 netif_wake_queue(net);
908
909open_unlock:
910 mutex_unlock(&priv->mcp_lock);
911 return ret;
922} 912}
923 913
924static const struct net_device_ops mcp251x_netdev_ops = { 914static const struct net_device_ops mcp251x_netdev_ops = {
@@ -952,11 +942,13 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
952 priv->can.bittiming_const = &mcp251x_bittiming_const; 942 priv->can.bittiming_const = &mcp251x_bittiming_const;
953 priv->can.do_set_mode = mcp251x_do_set_mode; 943 priv->can.do_set_mode = mcp251x_do_set_mode;
954 priv->can.clock.freq = pdata->oscillator_frequency / 2; 944 priv->can.clock.freq = pdata->oscillator_frequency / 2;
945 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
946 CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
955 priv->net = net; 947 priv->net = net;
956 dev_set_drvdata(&spi->dev, priv); 948 dev_set_drvdata(&spi->dev, priv);
957 949
958 priv->spi = spi; 950 priv->spi = spi;
959 mutex_init(&priv->spi_lock); 951 mutex_init(&priv->mcp_lock);
960 952
961 /* If requested, allocate DMA buffers */ 953 /* If requested, allocate DMA buffers */
962 if (mcp251x_enable_dma) { 954 if (mcp251x_enable_dma) {
@@ -1005,18 +997,12 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
1005 997
1006 SET_NETDEV_DEV(net, &spi->dev); 998 SET_NETDEV_DEV(net, &spi->dev);
1007 999
1008 priv->wq = create_freezeable_workqueue("mcp251x_wq");
1009
1010 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
1011 INIT_WORK(&priv->irq_work, mcp251x_irq_work_handler);
1012
1013 init_completion(&priv->awake);
1014
1015 /* Configure the SPI bus */ 1000 /* Configure the SPI bus */
1016 spi->mode = SPI_MODE_0; 1001 spi->mode = SPI_MODE_0;
1017 spi->bits_per_word = 8; 1002 spi->bits_per_word = 8;
1018 spi_setup(spi); 1003 spi_setup(spi);
1019 1004
1005 /* Here is OK to not lock the MCP, no one knows about it yet */
1020 if (!mcp251x_hw_probe(spi)) { 1006 if (!mcp251x_hw_probe(spi)) {
1021 dev_info(&spi->dev, "Probe failed\n"); 1007 dev_info(&spi->dev, "Probe failed\n");
1022 goto error_probe; 1008 goto error_probe;
@@ -1059,10 +1045,6 @@ static int __devexit mcp251x_can_remove(struct spi_device *spi)
1059 unregister_candev(net); 1045 unregister_candev(net);
1060 free_candev(net); 1046 free_candev(net);
1061 1047
1062 priv->force_quit = 1;
1063 flush_workqueue(priv->wq);
1064 destroy_workqueue(priv->wq);
1065
1066 if (mcp251x_enable_dma) { 1048 if (mcp251x_enable_dma) {
1067 dma_free_coherent(&spi->dev, PAGE_SIZE, 1049 dma_free_coherent(&spi->dev, PAGE_SIZE,
1068 priv->spi_tx_buf, priv->spi_tx_dma); 1050 priv->spi_tx_buf, priv->spi_tx_dma);
@@ -1084,6 +1066,12 @@ static int mcp251x_can_suspend(struct spi_device *spi, pm_message_t state)
1084 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 1066 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
1085 struct net_device *net = priv->net; 1067 struct net_device *net = priv->net;
1086 1068
1069 priv->force_quit = 1;
1070 disable_irq(spi->irq);
1071 /*
1072 * Note: at this point neither IST nor workqueues are running.
1073 * open/stop cannot be called anyway so locking is not needed
1074 */
1087 if (netif_running(net)) { 1075 if (netif_running(net)) {
1088 netif_device_detach(net); 1076 netif_device_detach(net);
1089 1077
@@ -1110,16 +1098,18 @@ static int mcp251x_can_resume(struct spi_device *spi)
1110 1098
1111 if (priv->after_suspend & AFTER_SUSPEND_POWER) { 1099 if (priv->after_suspend & AFTER_SUSPEND_POWER) {
1112 pdata->power_enable(1); 1100 pdata->power_enable(1);
1113 queue_work(priv->wq, &priv->irq_work); 1101 queue_work(priv->wq, &priv->restart_work);
1114 } else { 1102 } else {
1115 if (priv->after_suspend & AFTER_SUSPEND_UP) { 1103 if (priv->after_suspend & AFTER_SUSPEND_UP) {
1116 if (pdata->transceiver_enable) 1104 if (pdata->transceiver_enable)
1117 pdata->transceiver_enable(1); 1105 pdata->transceiver_enable(1);
1118 queue_work(priv->wq, &priv->irq_work); 1106 queue_work(priv->wq, &priv->restart_work);
1119 } else { 1107 } else {
1120 priv->after_suspend = 0; 1108 priv->after_suspend = 0;
1121 } 1109 }
1122 } 1110 }
1111 priv->force_quit = 0;
1112 enable_irq(spi->irq);
1123 return 0; 1113 return 0;
1124} 1114}
1125#else 1115#else
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index cd0f2d6f375d..27d1d398e25e 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -11,12 +11,13 @@ if CAN_MSCAN
11 11
12config CAN_MPC5XXX 12config CAN_MPC5XXX
13 tristate "Freescale MPC5xxx onboard CAN controller" 13 tristate "Freescale MPC5xxx onboard CAN controller"
14 depends on PPC_MPC52xx 14 depends on (PPC_MPC52xx || PPC_MPC512x)
15 ---help--- 15 ---help---
16 If you say yes here you get support for Freescale's MPC5xxx 16 If you say yes here you get support for Freescale's MPC5xxx
17 onboard CAN controller. 17 onboard CAN controller. Currently, the MPC5200, MPC5200B and
18 MPC5121 (Rev. 2 and later) are supported.
18 19
19 This driver can also be built as a module. If so, the module 20 This driver can also be built as a module. If so, the module
20 will be called mscan-mpc5xxx.ko. 21 will be called mscan-mpc5xxx.ko.
21 22
22endif 23endif
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 1de6f6349b16..03e7c48465a2 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -29,6 +29,7 @@
29#include <linux/can/dev.h> 29#include <linux/can/dev.h>
30#include <linux/of_platform.h> 30#include <linux/of_platform.h>
31#include <sysdev/fsl_soc.h> 31#include <sysdev/fsl_soc.h>
32#include <linux/clk.h>
32#include <linux/io.h> 33#include <linux/io.h>
33#include <asm/mpc52xx.h> 34#include <asm/mpc52xx.h>
34 35
@@ -36,22 +37,21 @@
36 37
37#define DRV_NAME "mpc5xxx_can" 38#define DRV_NAME "mpc5xxx_can"
38 39
39static struct of_device_id mpc52xx_cdm_ids[] __devinitdata = { 40struct mpc5xxx_can_data {
41 unsigned int type;
42 u32 (*get_clock)(struct of_device *ofdev, const char *clock_name,
43 int *mscan_clksrc);
44};
45
46#ifdef CONFIG_PPC_MPC52xx
47static struct of_device_id __devinitdata mpc52xx_cdm_ids[] = {
40 { .compatible = "fsl,mpc5200-cdm", }, 48 { .compatible = "fsl,mpc5200-cdm", },
41 {} 49 {}
42}; 50};
43 51
44/* 52static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
45 * Get frequency of the MSCAN clock source 53 const char *clock_name,
46 * 54 int *mscan_clksrc)
47 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock (IP_CLK)
48 * can be selected. According to the MPC5200 user's manual, the oscillator
49 * clock is the better choice as it has less jitter but due to a hardware
50 * bug, it can not be selected for the old MPC5200 Rev. A chips.
51 */
52
53static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
54 int clock_src)
55{ 55{
56 unsigned int pvr; 56 unsigned int pvr;
57 struct mpc52xx_cdm __iomem *cdm; 57 struct mpc52xx_cdm __iomem *cdm;
@@ -61,21 +61,33 @@ static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
61 61
62 pvr = mfspr(SPRN_PVR); 62 pvr = mfspr(SPRN_PVR);
63 63
64 freq = mpc5xxx_get_bus_frequency(of->node); 64 /*
65 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock
66 * (IP_CLK) can be selected as MSCAN clock source. According to
67 * the MPC5200 user's manual, the oscillator clock is the better
68 * choice as it has less jitter. For this reason, it is selected
69 * by default. Unfortunately, it can not be selected for the old
70 * MPC5200 Rev. A chips due to a hardware bug (check errata).
71 */
72 if (clock_name && strcmp(clock_name, "ip") == 0)
73 *mscan_clksrc = MSCAN_CLKSRC_BUS;
74 else
75 *mscan_clksrc = MSCAN_CLKSRC_XTAL;
76
77 freq = mpc5xxx_get_bus_frequency(ofdev->node);
65 if (!freq) 78 if (!freq)
66 return 0; 79 return 0;
67 80
68 if (clock_src == MSCAN_CLKSRC_BUS || pvr == 0x80822011) 81 if (*mscan_clksrc == MSCAN_CLKSRC_BUS || pvr == 0x80822011)
69 return freq; 82 return freq;
70 83
71 /* Determine SYS_XTAL_IN frequency from the clock domain settings */ 84 /* Determine SYS_XTAL_IN frequency from the clock domain settings */
72 np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids); 85 np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids);
73 if (!np_cdm) { 86 if (!np_cdm) {
74 dev_err(&of->dev, "can't get clock node!\n"); 87 dev_err(&ofdev->dev, "can't get clock node!\n");
75 return 0; 88 return 0;
76 } 89 }
77 cdm = of_iomap(np_cdm, 0); 90 cdm = of_iomap(np_cdm, 0);
78 of_node_put(np_cdm);
79 91
80 if (in_8(&cdm->ipb_clk_sel) & 0x1) 92 if (in_8(&cdm->ipb_clk_sel) & 0x1)
81 freq *= 2; 93 freq *= 2;
@@ -84,26 +96,174 @@ static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
84 freq *= (val & (1 << 5)) ? 8 : 4; 96 freq *= (val & (1 << 5)) ? 8 : 4;
85 freq /= (val & (1 << 6)) ? 12 : 16; 97 freq /= (val & (1 << 6)) ? 12 : 16;
86 98
99 of_node_put(np_cdm);
87 iounmap(cdm); 100 iounmap(cdm);
88 101
89 return freq; 102 return freq;
90} 103}
104#else /* !CONFIG_PPC_MPC52xx */
105static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
106 const char *clock_name,
107 int *mscan_clksrc)
108{
109 return 0;
110}
111#endif /* CONFIG_PPC_MPC52xx */
112
113#ifdef CONFIG_PPC_MPC512x
114struct mpc512x_clockctl {
115 u32 spmr; /* System PLL Mode Reg */
116 u32 sccr[2]; /* System Clk Ctrl Reg 1 & 2 */
117 u32 scfr1; /* System Clk Freq Reg 1 */
118 u32 scfr2; /* System Clk Freq Reg 2 */
119 u32 reserved;
120 u32 bcr; /* Bread Crumb Reg */
121 u32 pccr[12]; /* PSC Clk Ctrl Reg 0-11 */
122 u32 spccr; /* SPDIF Clk Ctrl Reg */
123 u32 cccr; /* CFM Clk Ctrl Reg */
124 u32 dccr; /* DIU Clk Cnfg Reg */
125 u32 mccr[4]; /* MSCAN Clk Ctrl Reg 1-3 */
126};
127
128static struct of_device_id __devinitdata mpc512x_clock_ids[] = {
129 { .compatible = "fsl,mpc5121-clock", },
130 {}
131};
132
133static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
134 const char *clock_name,
135 int *mscan_clksrc)
136{
137 struct mpc512x_clockctl __iomem *clockctl;
138 struct device_node *np_clock;
139 struct clk *sys_clk, *ref_clk;
140 int plen, clockidx, clocksrc = -1;
141 u32 sys_freq, val, clockdiv = 1, freq = 0;
142 const u32 *pval;
143
144 np_clock = of_find_matching_node(NULL, mpc512x_clock_ids);
145 if (!np_clock) {
146 dev_err(&ofdev->dev, "couldn't find clock node\n");
147 return -ENODEV;
148 }
149 clockctl = of_iomap(np_clock, 0);
150 if (!clockctl) {
151 dev_err(&ofdev->dev, "couldn't map clock registers\n");
152 return 0;
153 }
154
155 /* Determine the MSCAN device index from the physical address */
156 pval = of_get_property(ofdev->node, "reg", &plen);
157 BUG_ON(!pval || plen < sizeof(*pval));
158 clockidx = (*pval & 0x80) ? 1 : 0;
159 if (*pval & 0x2000)
160 clockidx += 2;
161
162 /*
163 * Clock source and divider selection: 3 different clock sources
164 * can be selected: "ip", "ref" or "sys". For the latter two, a
165 * clock divider can be defined as well. If the clock source is
166 * not specified by the device tree, we first try to find an
167 * optimal CAN source clock based on the system clock. If that
168 * is not posslible, the reference clock will be used.
169 */
170 if (clock_name && !strcmp(clock_name, "ip")) {
171 *mscan_clksrc = MSCAN_CLKSRC_IPS;
172 freq = mpc5xxx_get_bus_frequency(ofdev->node);
173 } else {
174 *mscan_clksrc = MSCAN_CLKSRC_BUS;
175
176 pval = of_get_property(ofdev->node,
177 "fsl,mscan-clock-divider", &plen);
178 if (pval && plen == sizeof(*pval))
179 clockdiv = *pval;
180 if (!clockdiv)
181 clockdiv = 1;
182
183 if (!clock_name || !strcmp(clock_name, "sys")) {
184 sys_clk = clk_get(&ofdev->dev, "sys_clk");
185 if (!sys_clk) {
186 dev_err(&ofdev->dev, "couldn't get sys_clk\n");
187 goto exit_unmap;
188 }
189 /* Get and round up/down sys clock rate */
190 sys_freq = 1000000 *
191 ((clk_get_rate(sys_clk) + 499999) / 1000000);
192
193 if (!clock_name) {
194 /* A multiple of 16 MHz would be optimal */
195 if ((sys_freq % 16000000) == 0) {
196 clocksrc = 0;
197 clockdiv = sys_freq / 16000000;
198 freq = sys_freq / clockdiv;
199 }
200 } else {
201 clocksrc = 0;
202 freq = sys_freq / clockdiv;
203 }
204 }
205
206 if (clocksrc < 0) {
207 ref_clk = clk_get(&ofdev->dev, "ref_clk");
208 if (!ref_clk) {
209 dev_err(&ofdev->dev, "couldn't get ref_clk\n");
210 goto exit_unmap;
211 }
212 clocksrc = 1;
213 freq = clk_get_rate(ref_clk) / clockdiv;
214 }
215 }
216
217 /* Disable clock */
218 out_be32(&clockctl->mccr[clockidx], 0x0);
219 if (clocksrc >= 0) {
220 /* Set source and divider */
221 val = (clocksrc << 14) | ((clockdiv - 1) << 17);
222 out_be32(&clockctl->mccr[clockidx], val);
223 /* Enable clock */
224 out_be32(&clockctl->mccr[clockidx], val | 0x10000);
225 }
226
227 /* Enable MSCAN clock domain */
228 val = in_be32(&clockctl->sccr[1]);
229 if (!(val & (1 << 25)))
230 out_be32(&clockctl->sccr[1], val | (1 << 25));
231
232 dev_dbg(&ofdev->dev, "using '%s' with frequency divider %d\n",
233 *mscan_clksrc == MSCAN_CLKSRC_IPS ? "ips_clk" :
234 clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv);
235
236exit_unmap:
237 of_node_put(np_clock);
238 iounmap(clockctl);
239
240 return freq;
241}
242#else /* !CONFIG_PPC_MPC512x */
243static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
244 const char *clock_name,
245 int *mscan_clksrc)
246{
247 return 0;
248}
249#endif /* CONFIG_PPC_MPC512x */
91 250
92static int __devinit mpc5xxx_can_probe(struct of_device *ofdev, 251static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
93 const struct of_device_id *id) 252 const struct of_device_id *id)
94{ 253{
254 struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data;
95 struct device_node *np = ofdev->node; 255 struct device_node *np = ofdev->node;
96 struct net_device *dev; 256 struct net_device *dev;
97 struct mscan_priv *priv; 257 struct mscan_priv *priv;
98 void __iomem *base; 258 void __iomem *base;
99 const char *clk_src; 259 const char *clock_name = NULL;
100 int err, irq, clock_src; 260 int irq, mscan_clksrc = 0;
261 int err = -ENOMEM;
101 262
102 base = of_iomap(ofdev->node, 0); 263 base = of_iomap(np, 0);
103 if (!base) { 264 if (!base) {
104 dev_err(&ofdev->dev, "couldn't ioremap\n"); 265 dev_err(&ofdev->dev, "couldn't ioremap\n");
105 err = -ENOMEM; 266 return err;
106 goto exit_release_mem;
107 } 267 }
108 268
109 irq = irq_of_parse_and_map(np, 0); 269 irq = irq_of_parse_and_map(np, 0);
@@ -114,37 +274,27 @@ static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
114 } 274 }
115 275
116 dev = alloc_mscandev(); 276 dev = alloc_mscandev();
117 if (!dev) { 277 if (!dev)
118 err = -ENOMEM;
119 goto exit_dispose_irq; 278 goto exit_dispose_irq;
120 }
121 279
122 priv = netdev_priv(dev); 280 priv = netdev_priv(dev);
123 priv->reg_base = base; 281 priv->reg_base = base;
124 dev->irq = irq; 282 dev->irq = irq;
125 283
126 /* 284 clock_name = of_get_property(np, "fsl,mscan-clock-source", NULL);
127 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock 285
128 * (IP_CLK) can be selected as MSCAN clock source. According to 286 BUG_ON(!data);
129 * the MPC5200 user's manual, the oscillator clock is the better 287 priv->type = data->type;
130 * choice as it has less jitter. For this reason, it is selected 288 priv->can.clock.freq = data->get_clock(ofdev, clock_name,
131 * by default. 289 &mscan_clksrc);
132 */
133 clk_src = of_get_property(np, "fsl,mscan-clock-source", NULL);
134 if (clk_src && strcmp(clk_src, "ip") == 0)
135 clock_src = MSCAN_CLKSRC_BUS;
136 else
137 clock_src = MSCAN_CLKSRC_XTAL;
138 priv->can.clock.freq = mpc52xx_can_clock_freq(ofdev, clock_src);
139 if (!priv->can.clock.freq) { 290 if (!priv->can.clock.freq) {
140 dev_err(&ofdev->dev, "couldn't get MSCAN clock frequency\n"); 291 dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n");
141 err = -ENODEV;
142 goto exit_free_mscan; 292 goto exit_free_mscan;
143 } 293 }
144 294
145 SET_NETDEV_DEV(dev, &ofdev->dev); 295 SET_NETDEV_DEV(dev, &ofdev->dev);
146 296
147 err = register_mscandev(dev, clock_src); 297 err = register_mscandev(dev, mscan_clksrc);
148 if (err) { 298 if (err) {
149 dev_err(&ofdev->dev, "registering %s failed (err=%d)\n", 299 dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
150 DRV_NAME, err); 300 DRV_NAME, err);
@@ -164,7 +314,7 @@ exit_dispose_irq:
164 irq_dispose_mapping(irq); 314 irq_dispose_mapping(irq);
165exit_unmap_mem: 315exit_unmap_mem:
166 iounmap(base); 316 iounmap(base);
167exit_release_mem: 317
168 return err; 318 return err;
169} 319}
170 320
@@ -225,8 +375,20 @@ static int mpc5xxx_can_resume(struct of_device *ofdev)
225} 375}
226#endif 376#endif
227 377
378static struct mpc5xxx_can_data __devinitdata mpc5200_can_data = {
379 .type = MSCAN_TYPE_MPC5200,
380 .get_clock = mpc52xx_can_get_clock,
381};
382
383static struct mpc5xxx_can_data __devinitdata mpc5121_can_data = {
384 .type = MSCAN_TYPE_MPC5121,
385 .get_clock = mpc512x_can_get_clock,
386};
387
228static struct of_device_id __devinitdata mpc5xxx_can_table[] = { 388static struct of_device_id __devinitdata mpc5xxx_can_table[] = {
229 {.compatible = "fsl,mpc5200-mscan"}, 389 { .compatible = "fsl,mpc5200-mscan", .data = &mpc5200_can_data, },
390 /* Note that only MPC5121 Rev. 2 (and later) is supported */
391 { .compatible = "fsl,mpc5121-mscan", .data = &mpc5121_can_data, },
230 {}, 392 {},
231}; 393};
232 394
@@ -255,5 +417,5 @@ static void __exit mpc5xxx_can_exit(void)
255module_exit(mpc5xxx_can_exit); 417module_exit(mpc5xxx_can_exit);
256 418
257MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); 419MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
258MODULE_DESCRIPTION("Freescale MPC5200 CAN driver"); 420MODULE_DESCRIPTION("Freescale MPC5xxx CAN driver");
259MODULE_LICENSE("GPL v2"); 421MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 07346f880ca6..6b7dd578d417 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -4,7 +4,7 @@
4 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>, 4 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
5 * Varma Electronics Oy 5 * Varma Electronics Oy
6 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com> 6 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
7 * Copytight (C) 2008-2009 Pengutronix <kernel@pengutronix.de> 7 * Copyright (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the version 2 of the GNU General Public License 10 * it under the terms of the version 2 of the GNU General Public License
@@ -152,6 +152,12 @@ static int mscan_start(struct net_device *dev)
152 priv->shadow_canrier = 0; 152 priv->shadow_canrier = 0;
153 priv->flags = 0; 153 priv->flags = 0;
154 154
155 if (priv->type == MSCAN_TYPE_MPC5121) {
156 /* Clear pending bus-off condition */
157 if (in_8(&regs->canmisc) & MSCAN_BOHOLD)
158 out_8(&regs->canmisc, MSCAN_BOHOLD);
159 }
160
155 err = mscan_set_mode(dev, MSCAN_NORMAL_MODE); 161 err = mscan_set_mode(dev, MSCAN_NORMAL_MODE);
156 if (err) 162 if (err)
157 return err; 163 return err;
@@ -163,8 +169,29 @@ static int mscan_start(struct net_device *dev)
163 out_8(&regs->cantier, 0); 169 out_8(&regs->cantier, 0);
164 170
165 /* Enable receive interrupts. */ 171 /* Enable receive interrupts. */
166 out_8(&regs->canrier, MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE | 172 out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
167 MSCAN_RSTATE1 | MSCAN_RSTATE0 | MSCAN_TSTATE1 | MSCAN_TSTATE0); 173
174 return 0;
175}
176
177static int mscan_restart(struct net_device *dev)
178{
179 struct mscan_priv *priv = netdev_priv(dev);
180
181 if (priv->type == MSCAN_TYPE_MPC5121) {
182 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
183
184 priv->can.state = CAN_STATE_ERROR_ACTIVE;
185 WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
186 "bus-off state expected");
187 out_8(&regs->canmisc, MSCAN_BOHOLD);
188 /* Re-enable receive interrupts. */
189 out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
190 } else {
191 if (priv->can.state <= CAN_STATE_BUS_OFF)
192 mscan_set_mode(dev, MSCAN_INIT_MODE);
193 return mscan_start(dev);
194 }
168 195
169 return 0; 196 return 0;
170} 197}
@@ -177,8 +204,8 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
177 int i, rtr, buf_id; 204 int i, rtr, buf_id;
178 u32 can_id; 205 u32 can_id;
179 206
180 if (frame->can_dlc > 8) 207 if (can_dropped_invalid_skb(dev, skb))
181 return -EINVAL; 208 return NETDEV_TX_OK;
182 209
183 out_8(&regs->cantier, 0); 210 out_8(&regs->cantier, 0);
184 211
@@ -359,9 +386,12 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
359 * automatically. To avoid that we stop the chip doing 386 * automatically. To avoid that we stop the chip doing
360 * a light-weight stop (we are in irq-context). 387 * a light-weight stop (we are in irq-context).
361 */ 388 */
362 out_8(&regs->cantier, 0); 389 if (priv->type != MSCAN_TYPE_MPC5121) {
363 out_8(&regs->canrier, 0); 390 out_8(&regs->cantier, 0);
364 setbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ); 391 out_8(&regs->canrier, 0);
392 setbits8(&regs->canctl0,
393 MSCAN_SLPRQ | MSCAN_INITRQ);
394 }
365 can_bus_off(dev); 395 can_bus_off(dev);
366 break; 396 break;
367 default: 397 default:
@@ -491,9 +521,7 @@ static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
491 521
492 switch (mode) { 522 switch (mode) {
493 case CAN_MODE_START: 523 case CAN_MODE_START:
494 if (priv->can.state <= CAN_STATE_BUS_OFF) 524 ret = mscan_restart(dev);
495 mscan_set_mode(dev, MSCAN_INIT_MODE);
496 ret = mscan_start(dev);
497 if (ret) 525 if (ret)
498 break; 526 break;
499 if (netif_queue_stopped(dev)) 527 if (netif_queue_stopped(dev))
@@ -592,18 +620,21 @@ static const struct net_device_ops mscan_netdev_ops = {
592 .ndo_start_xmit = mscan_start_xmit, 620 .ndo_start_xmit = mscan_start_xmit,
593}; 621};
594 622
595int register_mscandev(struct net_device *dev, int clock_src) 623int register_mscandev(struct net_device *dev, int mscan_clksrc)
596{ 624{
597 struct mscan_priv *priv = netdev_priv(dev); 625 struct mscan_priv *priv = netdev_priv(dev);
598 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; 626 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
599 u8 ctl1; 627 u8 ctl1;
600 628
601 ctl1 = in_8(&regs->canctl1); 629 ctl1 = in_8(&regs->canctl1);
602 if (clock_src) 630 if (mscan_clksrc)
603 ctl1 |= MSCAN_CLKSRC; 631 ctl1 |= MSCAN_CLKSRC;
604 else 632 else
605 ctl1 &= ~MSCAN_CLKSRC; 633 ctl1 &= ~MSCAN_CLKSRC;
606 634
635 if (priv->type == MSCAN_TYPE_MPC5121)
636 ctl1 |= MSCAN_BORM; /* bus-off recovery upon request */
637
607 ctl1 |= MSCAN_CANE; 638 ctl1 |= MSCAN_CANE;
608 out_8(&regs->canctl1, ctl1); 639 out_8(&regs->canctl1, ctl1);
609 udelay(100); 640 udelay(100);
@@ -655,6 +686,7 @@ struct net_device *alloc_mscandev(void)
655 priv->can.bittiming_const = &mscan_bittiming_const; 686 priv->can.bittiming_const = &mscan_bittiming_const;
656 priv->can.do_set_bittiming = mscan_do_set_bittiming; 687 priv->can.do_set_bittiming = mscan_do_set_bittiming;
657 priv->can.do_set_mode = mscan_do_set_mode; 688 priv->can.do_set_mode = mscan_do_set_mode;
689 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
658 690
659 for (i = 0; i < TX_QUEUE_SIZE; i++) { 691 for (i = 0; i < TX_QUEUE_SIZE; i++) {
660 priv->tx_queue[i].id = i; 692 priv->tx_queue[i].id = i;
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index 00fc4aaf1ed8..4ff966473bc9 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -38,18 +38,20 @@
38#define MSCAN_CLKSRC 0x40 38#define MSCAN_CLKSRC 0x40
39#define MSCAN_LOOPB 0x20 39#define MSCAN_LOOPB 0x20
40#define MSCAN_LISTEN 0x10 40#define MSCAN_LISTEN 0x10
41#define MSCAN_BORM 0x08
41#define MSCAN_WUPM 0x04 42#define MSCAN_WUPM 0x04
42#define MSCAN_SLPAK 0x02 43#define MSCAN_SLPAK 0x02
43#define MSCAN_INITAK 0x01 44#define MSCAN_INITAK 0x01
44 45
45/* Use the MPC5200 MSCAN variant? */ 46/* Use the MPC5XXX MSCAN variant? */
46#ifdef CONFIG_PPC 47#ifdef CONFIG_PPC
47#define MSCAN_FOR_MPC5200 48#define MSCAN_FOR_MPC5XXX
48#endif 49#endif
49 50
50#ifdef MSCAN_FOR_MPC5200 51#ifdef MSCAN_FOR_MPC5XXX
51#define MSCAN_CLKSRC_BUS 0 52#define MSCAN_CLKSRC_BUS 0
52#define MSCAN_CLKSRC_XTAL MSCAN_CLKSRC 53#define MSCAN_CLKSRC_XTAL MSCAN_CLKSRC
54#define MSCAN_CLKSRC_IPS MSCAN_CLKSRC
53#else 55#else
54#define MSCAN_CLKSRC_BUS MSCAN_CLKSRC 56#define MSCAN_CLKSRC_BUS MSCAN_CLKSRC
55#define MSCAN_CLKSRC_XTAL 0 57#define MSCAN_CLKSRC_XTAL 0
@@ -136,7 +138,7 @@
136#define MSCAN_EFF_RTR_SHIFT 0 138#define MSCAN_EFF_RTR_SHIFT 0
137#define MSCAN_EFF_FLAGS 0x18 /* IDE + SRR */ 139#define MSCAN_EFF_FLAGS 0x18 /* IDE + SRR */
138 140
139#ifdef MSCAN_FOR_MPC5200 141#ifdef MSCAN_FOR_MPC5XXX
140#define _MSCAN_RESERVED_(n, num) u8 _res##n[num] 142#define _MSCAN_RESERVED_(n, num) u8 _res##n[num]
141#define _MSCAN_RESERVED_DSR_SIZE 2 143#define _MSCAN_RESERVED_DSR_SIZE 2
142#else 144#else
@@ -165,67 +167,66 @@ struct mscan_regs {
165 u8 cantbsel; /* + 0x14 0x0a */ 167 u8 cantbsel; /* + 0x14 0x0a */
166 u8 canidac; /* + 0x15 0x0b */ 168 u8 canidac; /* + 0x15 0x0b */
167 u8 reserved; /* + 0x16 0x0c */ 169 u8 reserved; /* + 0x16 0x0c */
168 _MSCAN_RESERVED_(6, 5); /* + 0x17 */ 170 _MSCAN_RESERVED_(6, 2); /* + 0x17 */
169#ifndef MSCAN_FOR_MPC5200 171 u8 canmisc; /* + 0x19 0x0d */
170 u8 canmisc; /* 0x0d */ 172 _MSCAN_RESERVED_(7, 2); /* + 0x1a */
171#endif
172 u8 canrxerr; /* + 0x1c 0x0e */ 173 u8 canrxerr; /* + 0x1c 0x0e */
173 u8 cantxerr; /* + 0x1d 0x0f */ 174 u8 cantxerr; /* + 0x1d 0x0f */
174 _MSCAN_RESERVED_(7, 2); /* + 0x1e */ 175 _MSCAN_RESERVED_(8, 2); /* + 0x1e */
175 u16 canidar1_0; /* + 0x20 0x10 */ 176 u16 canidar1_0; /* + 0x20 0x10 */
176 _MSCAN_RESERVED_(8, 2); /* + 0x22 */ 177 _MSCAN_RESERVED_(9, 2); /* + 0x22 */
177 u16 canidar3_2; /* + 0x24 0x12 */ 178 u16 canidar3_2; /* + 0x24 0x12 */
178 _MSCAN_RESERVED_(9, 2); /* + 0x26 */ 179 _MSCAN_RESERVED_(10, 2); /* + 0x26 */
179 u16 canidmr1_0; /* + 0x28 0x14 */ 180 u16 canidmr1_0; /* + 0x28 0x14 */
180 _MSCAN_RESERVED_(10, 2); /* + 0x2a */ 181 _MSCAN_RESERVED_(11, 2); /* + 0x2a */
181 u16 canidmr3_2; /* + 0x2c 0x16 */ 182 u16 canidmr3_2; /* + 0x2c 0x16 */
182 _MSCAN_RESERVED_(11, 2); /* + 0x2e */ 183 _MSCAN_RESERVED_(12, 2); /* + 0x2e */
183 u16 canidar5_4; /* + 0x30 0x18 */ 184 u16 canidar5_4; /* + 0x30 0x18 */
184 _MSCAN_RESERVED_(12, 2); /* + 0x32 */ 185 _MSCAN_RESERVED_(13, 2); /* + 0x32 */
185 u16 canidar7_6; /* + 0x34 0x1a */ 186 u16 canidar7_6; /* + 0x34 0x1a */
186 _MSCAN_RESERVED_(13, 2); /* + 0x36 */ 187 _MSCAN_RESERVED_(14, 2); /* + 0x36 */
187 u16 canidmr5_4; /* + 0x38 0x1c */ 188 u16 canidmr5_4; /* + 0x38 0x1c */
188 _MSCAN_RESERVED_(14, 2); /* + 0x3a */ 189 _MSCAN_RESERVED_(15, 2); /* + 0x3a */
189 u16 canidmr7_6; /* + 0x3c 0x1e */ 190 u16 canidmr7_6; /* + 0x3c 0x1e */
190 _MSCAN_RESERVED_(15, 2); /* + 0x3e */ 191 _MSCAN_RESERVED_(16, 2); /* + 0x3e */
191 struct { 192 struct {
192 u16 idr1_0; /* + 0x40 0x20 */ 193 u16 idr1_0; /* + 0x40 0x20 */
193 _MSCAN_RESERVED_(16, 2); /* + 0x42 */ 194 _MSCAN_RESERVED_(17, 2); /* + 0x42 */
194 u16 idr3_2; /* + 0x44 0x22 */ 195 u16 idr3_2; /* + 0x44 0x22 */
195 _MSCAN_RESERVED_(17, 2); /* + 0x46 */ 196 _MSCAN_RESERVED_(18, 2); /* + 0x46 */
196 u16 dsr1_0; /* + 0x48 0x24 */ 197 u16 dsr1_0; /* + 0x48 0x24 */
197 _MSCAN_RESERVED_(18, 2); /* + 0x4a */ 198 _MSCAN_RESERVED_(19, 2); /* + 0x4a */
198 u16 dsr3_2; /* + 0x4c 0x26 */ 199 u16 dsr3_2; /* + 0x4c 0x26 */
199 _MSCAN_RESERVED_(19, 2); /* + 0x4e */ 200 _MSCAN_RESERVED_(20, 2); /* + 0x4e */
200 u16 dsr5_4; /* + 0x50 0x28 */ 201 u16 dsr5_4; /* + 0x50 0x28 */
201 _MSCAN_RESERVED_(20, 2); /* + 0x52 */ 202 _MSCAN_RESERVED_(21, 2); /* + 0x52 */
202 u16 dsr7_6; /* + 0x54 0x2a */ 203 u16 dsr7_6; /* + 0x54 0x2a */
203 _MSCAN_RESERVED_(21, 2); /* + 0x56 */ 204 _MSCAN_RESERVED_(22, 2); /* + 0x56 */
204 u8 dlr; /* + 0x58 0x2c */ 205 u8 dlr; /* + 0x58 0x2c */
205 u8:8; /* + 0x59 0x2d */ 206 u8 reserved; /* + 0x59 0x2d */
206 _MSCAN_RESERVED_(22, 2); /* + 0x5a */ 207 _MSCAN_RESERVED_(23, 2); /* + 0x5a */
207 u16 time; /* + 0x5c 0x2e */ 208 u16 time; /* + 0x5c 0x2e */
208 } rx; 209 } rx;
209 _MSCAN_RESERVED_(23, 2); /* + 0x5e */ 210 _MSCAN_RESERVED_(24, 2); /* + 0x5e */
210 struct { 211 struct {
211 u16 idr1_0; /* + 0x60 0x30 */ 212 u16 idr1_0; /* + 0x60 0x30 */
212 _MSCAN_RESERVED_(24, 2); /* + 0x62 */ 213 _MSCAN_RESERVED_(25, 2); /* + 0x62 */
213 u16 idr3_2; /* + 0x64 0x32 */ 214 u16 idr3_2; /* + 0x64 0x32 */
214 _MSCAN_RESERVED_(25, 2); /* + 0x66 */ 215 _MSCAN_RESERVED_(26, 2); /* + 0x66 */
215 u16 dsr1_0; /* + 0x68 0x34 */ 216 u16 dsr1_0; /* + 0x68 0x34 */
216 _MSCAN_RESERVED_(26, 2); /* + 0x6a */ 217 _MSCAN_RESERVED_(27, 2); /* + 0x6a */
217 u16 dsr3_2; /* + 0x6c 0x36 */ 218 u16 dsr3_2; /* + 0x6c 0x36 */
218 _MSCAN_RESERVED_(27, 2); /* + 0x6e */ 219 _MSCAN_RESERVED_(28, 2); /* + 0x6e */
219 u16 dsr5_4; /* + 0x70 0x38 */ 220 u16 dsr5_4; /* + 0x70 0x38 */
220 _MSCAN_RESERVED_(28, 2); /* + 0x72 */ 221 _MSCAN_RESERVED_(29, 2); /* + 0x72 */
221 u16 dsr7_6; /* + 0x74 0x3a */ 222 u16 dsr7_6; /* + 0x74 0x3a */
222 _MSCAN_RESERVED_(29, 2); /* + 0x76 */ 223 _MSCAN_RESERVED_(30, 2); /* + 0x76 */
223 u8 dlr; /* + 0x78 0x3c */ 224 u8 dlr; /* + 0x78 0x3c */
224 u8 tbpr; /* + 0x79 0x3d */ 225 u8 tbpr; /* + 0x79 0x3d */
225 _MSCAN_RESERVED_(30, 2); /* + 0x7a */ 226 _MSCAN_RESERVED_(31, 2); /* + 0x7a */
226 u16 time; /* + 0x7c 0x3e */ 227 u16 time; /* + 0x7c 0x3e */
227 } tx; 228 } tx;
228 _MSCAN_RESERVED_(31, 2); /* + 0x7e */ 229 _MSCAN_RESERVED_(32, 2); /* + 0x7e */
229} __attribute__ ((packed)); 230} __attribute__ ((packed));
230 231
231#undef _MSCAN_RESERVED_ 232#undef _MSCAN_RESERVED_
@@ -237,6 +238,15 @@ struct mscan_regs {
237#define MSCAN_POWEROFF_MODE (MSCAN_CSWAI | MSCAN_SLPRQ) 238#define MSCAN_POWEROFF_MODE (MSCAN_CSWAI | MSCAN_SLPRQ)
238#define MSCAN_SET_MODE_RETRIES 255 239#define MSCAN_SET_MODE_RETRIES 255
239#define MSCAN_ECHO_SKB_MAX 3 240#define MSCAN_ECHO_SKB_MAX 3
241#define MSCAN_RX_INTS_ENABLE (MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE | \
242 MSCAN_RSTATE1 | MSCAN_RSTATE0 | \
243 MSCAN_TSTATE1 | MSCAN_TSTATE0)
244
245/* MSCAN type variants */
246enum {
247 MSCAN_TYPE_MPC5200,
248 MSCAN_TYPE_MPC5121
249};
240 250
241#define BTR0_BRP_MASK 0x3f 251#define BTR0_BRP_MASK 0x3f
242#define BTR0_SJW_SHIFT 6 252#define BTR0_SJW_SHIFT 6
@@ -270,6 +280,7 @@ struct tx_queue_entry {
270 280
271struct mscan_priv { 281struct mscan_priv {
272 struct can_priv can; /* must be the first member */ 282 struct can_priv can; /* must be the first member */
283 unsigned int type; /* MSCAN type variants */
273 long open_time; 284 long open_time;
274 unsigned long flags; 285 unsigned long flags;
275 void __iomem *reg_base; /* ioremap'ed address to registers */ 286 void __iomem *reg_base; /* ioremap'ed address to registers */
@@ -285,12 +296,7 @@ struct mscan_priv {
285}; 296};
286 297
287extern struct net_device *alloc_mscandev(void); 298extern struct net_device *alloc_mscandev(void);
288/* 299extern int register_mscandev(struct net_device *dev, int mscan_clksrc);
289 * clock_src:
290 * 1 = The MSCAN clock source is the onchip Bus Clock.
291 * 0 = The MSCAN clock source is the chip Oscillator Clock.
292 */
293extern int register_mscandev(struct net_device *dev, int clock_src);
294extern void unregister_mscandev(struct net_device *dev); 300extern void unregister_mscandev(struct net_device *dev);
295 301
296#endif /* __MSCAN_H__ */ 302#endif /* __MSCAN_H__ */
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 4c674927f247..9e277d64a318 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -44,4 +44,16 @@ config CAN_KVASER_PCI
44 This driver is for the the PCIcanx and PCIcan cards (1, 2 or 44 This driver is for the the PCIcanx and PCIcan cards (1, 2 or
45 4 channel) from Kvaser (http://www.kvaser.com). 45 4 channel) from Kvaser (http://www.kvaser.com).
46 46
47config CAN_PLX_PCI
48 tristate "PLX90xx PCI-bridge based Cards"
49 depends on PCI
50 ---help---
51 This driver is for CAN interface cards based on
52 the PLX90xx PCI bridge.
53 Driver supports now:
54 - Adlink PCI-7841/cPCI-7841 card (http://www.adlinktech.com/)
55 - Adlink PCI-7841/cPCI-7841 SE card
56 - Marathon CAN-bus-PCI card (http://www.marathon.ru/)
57 - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
58
47endif 59endif
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index 9d245ac03965..ce924553995d 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -8,5 +8,6 @@ obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o
8obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o 8obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
9obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o 9obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
10obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o 10obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
11obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
11 12
12ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 13ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index fd04789d3370..87300606abb9 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -102,7 +102,7 @@ struct ems_pci_card {
102 102
103#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */ 103#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */
104 104
105static struct pci_device_id ems_pci_tbl[] = { 105static DEFINE_PCI_DEVICE_TABLE(ems_pci_tbl) = {
106 /* CPC-PCI v1 */ 106 /* CPC-PCI v1 */
107 {PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,}, 107 {PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,},
108 /* CPC-PCI v2 */ 108 /* CPC-PCI v2 */
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 7dd7769b9713..441e776a7f59 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -109,7 +109,7 @@ struct kvaser_pci {
109#define KVASER_PCI_VENDOR_ID2 0x1a07 /* the PCI device and vendor IDs */ 109#define KVASER_PCI_VENDOR_ID2 0x1a07 /* the PCI device and vendor IDs */
110#define KVASER_PCI_DEVICE_ID2 0x0008 110#define KVASER_PCI_DEVICE_ID2 0x0008
111 111
112static struct pci_device_id kvaser_pci_tbl[] = { 112static DEFINE_PCI_DEVICE_TABLE(kvaser_pci_tbl) = {
113 {KVASER_PCI_VENDOR_ID1, KVASER_PCI_DEVICE_ID1, PCI_ANY_ID, PCI_ANY_ID,}, 113 {KVASER_PCI_VENDOR_ID1, KVASER_PCI_DEVICE_ID1, PCI_ANY_ID, PCI_ANY_ID,},
114 {KVASER_PCI_VENDOR_ID2, KVASER_PCI_DEVICE_ID2, PCI_ANY_ID, PCI_ANY_ID,}, 114 {KVASER_PCI_VENDOR_ID2, KVASER_PCI_DEVICE_ID2, PCI_ANY_ID, PCI_ANY_ID,},
115 { 0,} 115 { 0,}
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
new file mode 100644
index 000000000000..6b46a6395f80
--- /dev/null
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -0,0 +1,472 @@
1/*
2 * Copyright (C) 2008-2010 Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>
3 *
4 * Derived from the ems_pci.c driver:
5 * Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com>
6 * Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com>
7 * Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the version 2 of the GNU General Public License
11 * as published by the Free Software Foundation
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/netdevice.h>
27#include <linux/delay.h>
28#include <linux/pci.h>
29#include <linux/can.h>
30#include <linux/can/dev.h>
31#include <linux/io.h>
32
33#include "sja1000.h"
34
35#define DRV_NAME "sja1000_plx_pci"
36
37MODULE_AUTHOR("Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>");
38MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with "
39 "the SJA1000 chips");
40MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
41 "Adlink PCI-7841/cPCI-7841 SE, "
42 "Marathon CAN-bus-PCI, "
43 "TEWS TECHNOLOGIES TPMC810");
44MODULE_LICENSE("GPL v2");
45
46#define PLX_PCI_MAX_CHAN 2
47
48struct plx_pci_card {
49 int channels; /* detected channels count */
50 struct net_device *net_dev[PLX_PCI_MAX_CHAN];
51 void __iomem *conf_addr;
52};
53
54#define PLX_PCI_CAN_CLOCK (16000000 / 2)
55
56/* PLX90xx registers */
57#define PLX_INTCSR 0x4c /* Interrupt Control/Status */
58#define PLX_CNTRL 0x50 /* User I/O, Direct Slave Response,
59 * Serial EEPROM, and Initialization
60 * Control register
61 */
62
63#define PLX_LINT1_EN 0x1 /* Local interrupt 1 enable */
64#define PLX_LINT2_EN (1 << 3) /* Local interrupt 2 enable */
65#define PLX_PCI_INT_EN (1 << 6) /* PCI Interrupt Enable */
66#define PLX_PCI_RESET (1 << 30) /* PCI Adapter Software Reset */
67
68/*
69 * The board configuration is probably following:
70 * RX1 is connected to ground.
71 * TX1 is not connected.
72 * CLKO is not connected.
73 * Setting the OCR register to 0xDA is a good idea.
74 * This means normal output mode, push-pull and the correct polarity.
75 */
76#define PLX_PCI_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL)
77
78/*
79 * In the CDR register, you should set CBP to 1.
80 * You will probably also want to set the clock divider value to 7
81 * (meaning direct oscillator output) because the second SJA1000 chip
82 * is driven by the first one CLKOUT output.
83 */
84#define PLX_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK)
85
86/* SJA1000 Control Register in the BasicCAN Mode */
87#define REG_CR 0x00
88
89/* States of some SJA1000 registers after hardware reset in the BasicCAN mode*/
90#define REG_CR_BASICCAN_INITIAL 0x21
91#define REG_CR_BASICCAN_INITIAL_MASK 0xa1
92#define REG_SR_BASICCAN_INITIAL 0x0c
93#define REG_IR_BASICCAN_INITIAL 0xe0
94
95/* States of some SJA1000 registers after hardware reset in the PeliCAN mode*/
96#define REG_MOD_PELICAN_INITIAL 0x01
97#define REG_SR_PELICAN_INITIAL 0x3c
98#define REG_IR_PELICAN_INITIAL 0x00
99
100#define ADLINK_PCI_VENDOR_ID 0x144A
101#define ADLINK_PCI_DEVICE_ID 0x7841
102
103#define MARATHON_PCI_DEVICE_ID 0x2715
104
105#define TEWS_PCI_VENDOR_ID 0x1498
106#define TEWS_PCI_DEVICE_ID_TMPC810 0x032A
107
108static void plx_pci_reset_common(struct pci_dev *pdev);
109static void plx_pci_reset_marathon(struct pci_dev *pdev);
110
111struct plx_pci_channel_map {
112 u32 bar;
113 u32 offset;
114 u32 size; /* 0x00 - auto, e.g. length of entire bar */
115};
116
117struct plx_pci_card_info {
118 const char *name;
119 int channel_count;
120 u32 can_clock;
121 u8 ocr; /* output control register */
122 u8 cdr; /* clock divider register */
123
124 /* Parameters for mapping local configuration space */
125 struct plx_pci_channel_map conf_map;
126
127 /* Parameters for mapping the SJA1000 chips */
128 struct plx_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CHAN];
129
130 /* Pointer to device-dependent reset function */
131 void (*reset_func)(struct pci_dev *pdev);
132};
133
134static struct plx_pci_card_info plx_pci_card_info_adlink __devinitdata = {
135 "Adlink PCI-7841/cPCI-7841", 2,
136 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
137 {1, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} },
138 &plx_pci_reset_common
139 /* based on PLX9052 */
140};
141
142static struct plx_pci_card_info plx_pci_card_info_adlink_se __devinitdata = {
143 "Adlink PCI-7841/cPCI-7841 SE", 2,
144 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
145 {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} },
146 &plx_pci_reset_common
147 /* based on PLX9052 */
148};
149
150static struct plx_pci_card_info plx_pci_card_info_marathon __devinitdata = {
151 "Marathon CAN-bus-PCI", 2,
152 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
153 {0, 0x00, 0x00}, { {2, 0x00, 0x00}, {4, 0x00, 0x00} },
154 &plx_pci_reset_marathon
155 /* based on PLX9052 */
156};
157
158static struct plx_pci_card_info plx_pci_card_info_tews __devinitdata = {
159 "TEWS TECHNOLOGIES TPMC810", 2,
160 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
161 {0, 0x00, 0x00}, { {2, 0x000, 0x80}, {2, 0x100, 0x80} },
162 &plx_pci_reset_common
163 /* based on PLX9030 */
164};
165
166static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
167 {
168 /* Adlink PCI-7841/cPCI-7841 */
169 ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID,
170 PCI_ANY_ID, PCI_ANY_ID,
171 PCI_CLASS_NETWORK_OTHER << 8, ~0,
172 (kernel_ulong_t)&plx_pci_card_info_adlink
173 },
174 {
175 /* Adlink PCI-7841/cPCI-7841 SE */
176 ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID,
177 PCI_ANY_ID, PCI_ANY_ID,
178 PCI_CLASS_COMMUNICATION_OTHER << 8, ~0,
179 (kernel_ulong_t)&plx_pci_card_info_adlink_se
180 },
181 {
182 /* Marathon CAN-bus-PCI card */
183 PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID,
184 PCI_ANY_ID, PCI_ANY_ID,
185 0, 0,
186 (kernel_ulong_t)&plx_pci_card_info_marathon
187 },
188 {
189 /* TEWS TECHNOLOGIES TPMC810 card */
190 TEWS_PCI_VENDOR_ID, TEWS_PCI_DEVICE_ID_TMPC810,
191 PCI_ANY_ID, PCI_ANY_ID,
192 0, 0,
193 (kernel_ulong_t)&plx_pci_card_info_tews
194 },
195 { 0,}
196};
197MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
198
199static u8 plx_pci_read_reg(const struct sja1000_priv *priv, int port)
200{
201 return ioread8(priv->reg_base + port);
202}
203
204static void plx_pci_write_reg(const struct sja1000_priv *priv, int port, u8 val)
205{
206 iowrite8(val, priv->reg_base + port);
207}
208
209/*
210 * Check if a CAN controller is present at the specified location
211 * by trying to switch 'em from the Basic mode into the PeliCAN mode.
212 * Also check states of some registers in reset mode.
213 */
214static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
215{
216 int flag = 0;
217
218 /*
219 * Check registers after hardware reset (the Basic mode)
220 * See states on p. 10 of the Datasheet.
221 */
222 if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) ==
223 REG_CR_BASICCAN_INITIAL &&
224 (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) &&
225 (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL))
226 flag = 1;
227
228 /* Bring the SJA1000 into the PeliCAN mode*/
229 priv->write_reg(priv, REG_CDR, CDR_PELICAN);
230
231 /*
232 * Check registers after reset in the PeliCAN mode.
233 * See states on p. 23 of the Datasheet.
234 */
235 if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL &&
236 priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL &&
237 priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL)
238 return flag;
239
240 return 0;
241}
242
243/*
244 * PLX90xx software reset
245 * Also LRESET# asserts and brings to reset device on the Local Bus (if wired).
246 * For most cards it's enough for reset the SJA1000 chips.
247 */
248static void plx_pci_reset_common(struct pci_dev *pdev)
249{
250 struct plx_pci_card *card = pci_get_drvdata(pdev);
251 u32 cntrl;
252
253 cntrl = ioread32(card->conf_addr + PLX_CNTRL);
254 cntrl |= PLX_PCI_RESET;
255 iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
256 udelay(100);
257 cntrl ^= PLX_PCI_RESET;
258 iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
259};
260
261/* Special reset function for Marathon card */
262static void plx_pci_reset_marathon(struct pci_dev *pdev)
263{
264 void __iomem *reset_addr;
265 int i;
266 int reset_bar[2] = {3, 5};
267
268 plx_pci_reset_common(pdev);
269
270 for (i = 0; i < 2; i++) {
271 reset_addr = pci_iomap(pdev, reset_bar[i], 0);
272 if (!reset_addr) {
273 dev_err(&pdev->dev, "Failed to remap reset "
274 "space %d (BAR%d)\n", i, reset_bar[i]);
275 } else {
276 /* reset the SJA1000 chip */
277 iowrite8(0x1, reset_addr);
278 udelay(100);
279 pci_iounmap(pdev, reset_addr);
280 }
281 }
282}
283
284static void plx_pci_del_card(struct pci_dev *pdev)
285{
286 struct plx_pci_card *card = pci_get_drvdata(pdev);
287 struct net_device *dev;
288 struct sja1000_priv *priv;
289 int i = 0;
290
291 for (i = 0; i < card->channels; i++) {
292 dev = card->net_dev[i];
293 if (!dev)
294 continue;
295
296 dev_info(&pdev->dev, "Removing %s\n", dev->name);
297 unregister_sja1000dev(dev);
298 priv = netdev_priv(dev);
299 if (priv->reg_base)
300 pci_iounmap(pdev, priv->reg_base);
301 free_sja1000dev(dev);
302 }
303
304 plx_pci_reset_common(pdev);
305
306 /*
307 * Disable interrupts from PCI-card (PLX90xx) and disable Local_1,
308 * Local_2 interrupts
309 */
310 iowrite32(0x0, card->conf_addr + PLX_INTCSR);
311
312 if (card->conf_addr)
313 pci_iounmap(pdev, card->conf_addr);
314
315 kfree(card);
316
317 pci_disable_device(pdev);
318 pci_set_drvdata(pdev, NULL);
319}
320
321/*
322 * Probe PLX90xx based device for the SJA1000 chips and register each
323 * available CAN channel to SJA1000 Socket-CAN subsystem.
324 */
325static int __devinit plx_pci_add_card(struct pci_dev *pdev,
326 const struct pci_device_id *ent)
327{
328 struct sja1000_priv *priv;
329 struct net_device *dev;
330 struct plx_pci_card *card;
331 struct plx_pci_card_info *ci;
332 int err, i;
333 u32 val;
334 void __iomem *addr;
335
336 ci = (struct plx_pci_card_info *)ent->driver_data;
337
338 if (pci_enable_device(pdev) < 0) {
339 dev_err(&pdev->dev, "Failed to enable PCI device\n");
340 return -ENODEV;
341 }
342
343 dev_info(&pdev->dev, "Detected \"%s\" card at slot #%i\n",
344 ci->name, PCI_SLOT(pdev->devfn));
345
346 /* Allocate card structures to hold addresses, ... */
347 card = kzalloc(sizeof(*card), GFP_KERNEL);
348 if (!card) {
349 dev_err(&pdev->dev, "Unable to allocate memory\n");
350 pci_disable_device(pdev);
351 return -ENOMEM;
352 }
353
354 pci_set_drvdata(pdev, card);
355
356 card->channels = 0;
357
358 /* Remap PLX90xx configuration space */
359 addr = pci_iomap(pdev, ci->conf_map.bar, ci->conf_map.size);
360 if (!addr) {
361 err = -ENOMEM;
362 dev_err(&pdev->dev, "Failed to remap configuration space "
363 "(BAR%d)\n", ci->conf_map.bar);
364 goto failure_cleanup;
365 }
366 card->conf_addr = addr + ci->conf_map.offset;
367
368 ci->reset_func(pdev);
369
370 /* Detect available channels */
371 for (i = 0; i < ci->channel_count; i++) {
372 struct plx_pci_channel_map *cm = &ci->chan_map_tbl[i];
373
374 dev = alloc_sja1000dev(0);
375 if (!dev) {
376 err = -ENOMEM;
377 goto failure_cleanup;
378 }
379
380 card->net_dev[i] = dev;
381 priv = netdev_priv(dev);
382 priv->priv = card;
383 priv->irq_flags = IRQF_SHARED;
384
385 dev->irq = pdev->irq;
386
387 /*
388 * Remap IO space of the SJA1000 chips
389 * This is device-dependent mapping
390 */
391 addr = pci_iomap(pdev, cm->bar, cm->size);
392 if (!addr) {
393 err = -ENOMEM;
394 dev_err(&pdev->dev, "Failed to remap BAR%d\n", cm->bar);
395 goto failure_cleanup;
396 }
397
398 priv->reg_base = addr + cm->offset;
399 priv->read_reg = plx_pci_read_reg;
400 priv->write_reg = plx_pci_write_reg;
401
402 /* Check if channel is present */
403 if (plx_pci_check_sja1000(priv)) {
404 priv->can.clock.freq = ci->can_clock;
405 priv->ocr = ci->ocr;
406 priv->cdr = ci->cdr;
407
408 SET_NETDEV_DEV(dev, &pdev->dev);
409
410 /* Register SJA1000 device */
411 err = register_sja1000dev(dev);
412 if (err) {
413 dev_err(&pdev->dev, "Registering device failed "
414 "(err=%d)\n", err);
415 free_sja1000dev(dev);
416 goto failure_cleanup;
417 }
418
419 card->channels++;
420
421 dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d "
422 "registered as %s\n", i + 1, priv->reg_base,
423 dev->irq, dev->name);
424 } else {
425 dev_err(&pdev->dev, "Channel #%d not detected\n",
426 i + 1);
427 free_sja1000dev(dev);
428 }
429 }
430
431 if (!card->channels) {
432 err = -ENODEV;
433 goto failure_cleanup;
434 }
435
436 /*
437 * Enable interrupts from PCI-card (PLX90xx) and enable Local_1,
438 * Local_2 interrupts from the SJA1000 chips
439 */
440 val = ioread32(card->conf_addr + PLX_INTCSR);
441 val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN;
442 iowrite32(val, card->conf_addr + PLX_INTCSR);
443
444 return 0;
445
446failure_cleanup:
447 dev_err(&pdev->dev, "Error: %d. Cleaning Up.\n", err);
448
449 plx_pci_del_card(pdev);
450
451 return err;
452}
453
454static struct pci_driver plx_pci_driver = {
455 .name = DRV_NAME,
456 .id_table = plx_pci_tbl,
457 .probe = plx_pci_add_card,
458 .remove = plx_pci_del_card,
459};
460
461static int __init plx_pci_init(void)
462{
463 return pci_register_driver(&plx_pci_driver);
464}
465
466static void __exit plx_pci_exit(void)
467{
468 pci_unregister_driver(&plx_pci_driver);
469}
470
471module_init(plx_pci_init);
472module_exit(plx_pci_exit);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 542a4f7255b4..145b1a731a53 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -130,8 +130,12 @@ static void set_normal_mode(struct net_device *dev)
130 /* check reset bit */ 130 /* check reset bit */
131 if ((status & MOD_RM) == 0) { 131 if ((status & MOD_RM) == 0) {
132 priv->can.state = CAN_STATE_ERROR_ACTIVE; 132 priv->can.state = CAN_STATE_ERROR_ACTIVE;
133 /* enable all interrupts */ 133 /* enable interrupts */
134 priv->write_reg(priv, REG_IER, IRQ_ALL); 134 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
135 priv->write_reg(priv, REG_IER, IRQ_ALL);
136 else
137 priv->write_reg(priv, REG_IER,
138 IRQ_ALL & ~IRQ_BEI);
135 return; 139 return;
136 } 140 }
137 141
@@ -203,6 +207,17 @@ static int sja1000_set_bittiming(struct net_device *dev)
203 return 0; 207 return 0;
204} 208}
205 209
210static int sja1000_get_berr_counter(const struct net_device *dev,
211 struct can_berr_counter *bec)
212{
213 struct sja1000_priv *priv = netdev_priv(dev);
214
215 bec->txerr = priv->read_reg(priv, REG_TXERR);
216 bec->rxerr = priv->read_reg(priv, REG_RXERR);
217
218 return 0;
219}
220
206/* 221/*
207 * initialize SJA1000 chip: 222 * initialize SJA1000 chip:
208 * - reset chip 223 * - reset chip
@@ -249,6 +264,9 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
249 uint8_t dreg; 264 uint8_t dreg;
250 int i; 265 int i;
251 266
267 if (can_dropped_invalid_skb(dev, skb))
268 return NETDEV_TX_OK;
269
252 netif_stop_queue(dev); 270 netif_stop_queue(dev);
253 271
254 fi = dlc = cf->can_dlc; 272 fi = dlc = cf->can_dlc;
@@ -434,6 +452,8 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
434 CAN_ERR_CRTL_TX_PASSIVE : 452 CAN_ERR_CRTL_TX_PASSIVE :
435 CAN_ERR_CRTL_RX_PASSIVE; 453 CAN_ERR_CRTL_RX_PASSIVE;
436 } 454 }
455 cf->data[6] = txerr;
456 cf->data[7] = rxerr;
437 } 457 }
438 458
439 priv->can.state = state; 459 priv->can.state = state;
@@ -564,6 +584,9 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
564 priv->can.bittiming_const = &sja1000_bittiming_const; 584 priv->can.bittiming_const = &sja1000_bittiming_const;
565 priv->can.do_set_bittiming = sja1000_set_bittiming; 585 priv->can.do_set_bittiming = sja1000_set_bittiming;
566 priv->can.do_set_mode = sja1000_set_mode; 586 priv->can.do_set_mode = sja1000_set_mode;
587 priv->can.do_get_berr_counter = sja1000_get_berr_counter;
588 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
589 CAN_CTRLMODE_BERR_REPORTING;
567 590
568 if (sizeof_priv) 591 if (sizeof_priv)
569 priv->priv = (void *)priv + sizeof(struct sja1000_priv); 592 priv->priv = (void *)priv + sizeof(struct sja1000_priv);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 5c993c2da528..0c3d2ba0d178 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -28,9 +28,11 @@
28 * .mbx_offset = 0x2000, 28 * .mbx_offset = 0x2000,
29 * .int_line = 0, 29 * .int_line = 0,
30 * .revision = 1, 30 * .revision = 1,
31 * .transceiver_switch = hecc_phy_control,
31 * }; 32 * };
32 * 33 *
33 * Please see include/can/platform/ti_hecc.h for description of above fields 34 * Please see include/linux/can/platform/ti_hecc.h for description of
35 * above fields.
34 * 36 *
35 */ 37 */
36 38
@@ -220,6 +222,7 @@ struct ti_hecc_priv {
220 u32 tx_head; 222 u32 tx_head;
221 u32 tx_tail; 223 u32 tx_tail;
222 u32 rx_next; 224 u32 rx_next;
225 void (*transceiver_switch)(int);
223}; 226};
224 227
225static inline int get_tx_head_mb(struct ti_hecc_priv *priv) 228static inline int get_tx_head_mb(struct ti_hecc_priv *priv)
@@ -317,6 +320,13 @@ static int ti_hecc_set_btc(struct ti_hecc_priv *priv)
317 return 0; 320 return 0;
318} 321}
319 322
323static void ti_hecc_transceiver_switch(const struct ti_hecc_priv *priv,
324 int on)
325{
326 if (priv->transceiver_switch)
327 priv->transceiver_switch(on);
328}
329
320static void ti_hecc_reset(struct net_device *ndev) 330static void ti_hecc_reset(struct net_device *ndev)
321{ 331{
322 u32 cnt; 332 u32 cnt;
@@ -477,6 +487,9 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
477 u32 mbxno, mbx_mask, data; 487 u32 mbxno, mbx_mask, data;
478 unsigned long flags; 488 unsigned long flags;
479 489
490 if (can_dropped_invalid_skb(ndev, skb))
491 return NETDEV_TX_OK;
492
480 mbxno = get_tx_head_mb(priv); 493 mbxno = get_tx_head_mb(priv);
481 mbx_mask = BIT(mbxno); 494 mbx_mask = BIT(mbxno);
482 spin_lock_irqsave(&priv->mbx_lock, flags); 495 spin_lock_irqsave(&priv->mbx_lock, flags);
@@ -491,7 +504,6 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
491 spin_unlock_irqrestore(&priv->mbx_lock, flags); 504 spin_unlock_irqrestore(&priv->mbx_lock, flags);
492 505
493 /* Prepare mailbox for transmission */ 506 /* Prepare mailbox for transmission */
494 data = min_t(u8, cf->can_dlc, 8);
495 if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */ 507 if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */
496 data |= HECC_CANMCF_RTR; 508 data |= HECC_CANMCF_RTR;
497 data |= get_tx_head_prio(priv) << 8; 509 data |= get_tx_head_prio(priv) << 8;
@@ -816,15 +828,17 @@ static int ti_hecc_open(struct net_device *ndev)
816 return err; 828 return err;
817 } 829 }
818 830
831 ti_hecc_transceiver_switch(priv, 1);
832
819 /* Open common can device */ 833 /* Open common can device */
820 err = open_candev(ndev); 834 err = open_candev(ndev);
821 if (err) { 835 if (err) {
822 dev_err(ndev->dev.parent, "open_candev() failed %d\n", err); 836 dev_err(ndev->dev.parent, "open_candev() failed %d\n", err);
837 ti_hecc_transceiver_switch(priv, 0);
823 free_irq(ndev->irq, ndev); 838 free_irq(ndev->irq, ndev);
824 return err; 839 return err;
825 } 840 }
826 841
827 clk_enable(priv->clk);
828 ti_hecc_start(ndev); 842 ti_hecc_start(ndev);
829 napi_enable(&priv->napi); 843 napi_enable(&priv->napi);
830 netif_start_queue(ndev); 844 netif_start_queue(ndev);
@@ -840,8 +854,8 @@ static int ti_hecc_close(struct net_device *ndev)
840 napi_disable(&priv->napi); 854 napi_disable(&priv->napi);
841 ti_hecc_stop(ndev); 855 ti_hecc_stop(ndev);
842 free_irq(ndev->irq, ndev); 856 free_irq(ndev->irq, ndev);
843 clk_disable(priv->clk);
844 close_candev(ndev); 857 close_candev(ndev);
858 ti_hecc_transceiver_switch(priv, 0);
845 859
846 return 0; 860 return 0;
847} 861}
@@ -903,10 +917,12 @@ static int ti_hecc_probe(struct platform_device *pdev)
903 priv->hecc_ram_offset = pdata->hecc_ram_offset; 917 priv->hecc_ram_offset = pdata->hecc_ram_offset;
904 priv->mbx_offset = pdata->mbx_offset; 918 priv->mbx_offset = pdata->mbx_offset;
905 priv->int_line = pdata->int_line; 919 priv->int_line = pdata->int_line;
920 priv->transceiver_switch = pdata->transceiver_switch;
906 921
907 priv->can.bittiming_const = &ti_hecc_bittiming_const; 922 priv->can.bittiming_const = &ti_hecc_bittiming_const;
908 priv->can.do_set_mode = ti_hecc_do_set_mode; 923 priv->can.do_set_mode = ti_hecc_do_set_mode;
909 priv->can.do_get_state = ti_hecc_get_state; 924 priv->can.do_get_state = ti_hecc_get_state;
925 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
910 926
911 ndev->irq = irq->start; 927 ndev->irq = irq->start;
912 ndev->flags |= IFF_ECHO; 928 ndev->flags |= IFF_ECHO;
@@ -925,6 +941,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
925 netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll, 941 netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
926 HECC_DEF_NAPI_WEIGHT); 942 HECC_DEF_NAPI_WEIGHT);
927 943
944 clk_enable(priv->clk);
928 err = register_candev(ndev); 945 err = register_candev(ndev);
929 if (err) { 946 if (err) {
930 dev_err(&pdev->dev, "register_candev() failed\n"); 947 dev_err(&pdev->dev, "register_candev() failed\n");
@@ -953,6 +970,7 @@ static int __devexit ti_hecc_remove(struct platform_device *pdev)
953 struct net_device *ndev = platform_get_drvdata(pdev); 970 struct net_device *ndev = platform_get_drvdata(pdev);
954 struct ti_hecc_priv *priv = netdev_priv(ndev); 971 struct ti_hecc_priv *priv = netdev_priv(ndev);
955 972
973 clk_disable(priv->clk);
956 clk_put(priv->clk); 974 clk_put(priv->clk);
957 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 975 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
958 iounmap(priv->base); 976 iounmap(priv->base);
@@ -964,6 +982,48 @@ static int __devexit ti_hecc_remove(struct platform_device *pdev)
964 return 0; 982 return 0;
965} 983}
966 984
985
986#ifdef CONFIG_PM
987static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
988{
989 struct net_device *dev = platform_get_drvdata(pdev);
990 struct ti_hecc_priv *priv = netdev_priv(dev);
991
992 if (netif_running(dev)) {
993 netif_stop_queue(dev);
994 netif_device_detach(dev);
995 }
996
997 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
998 priv->can.state = CAN_STATE_SLEEPING;
999
1000 clk_disable(priv->clk);
1001
1002 return 0;
1003}
1004
1005static int ti_hecc_resume(struct platform_device *pdev)
1006{
1007 struct net_device *dev = platform_get_drvdata(pdev);
1008 struct ti_hecc_priv *priv = netdev_priv(dev);
1009
1010 clk_enable(priv->clk);
1011
1012 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
1013 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1014
1015 if (netif_running(dev)) {
1016 netif_device_attach(dev);
1017 netif_start_queue(dev);
1018 }
1019
1020 return 0;
1021}
1022#else
1023#define ti_hecc_suspend NULL
1024#define ti_hecc_resume NULL
1025#endif
1026
967/* TI HECC netdevice driver: platform driver structure */ 1027/* TI HECC netdevice driver: platform driver structure */
968static struct platform_driver ti_hecc_driver = { 1028static struct platform_driver ti_hecc_driver = {
969 .driver = { 1029 .driver = {
@@ -972,6 +1032,8 @@ static struct platform_driver ti_hecc_driver = {
972 }, 1032 },
973 .probe = ti_hecc_probe, 1033 .probe = ti_hecc_probe,
974 .remove = __devexit_p(ti_hecc_remove), 1034 .remove = __devexit_p(ti_hecc_remove),
1035 .suspend = ti_hecc_suspend,
1036 .resume = ti_hecc_resume,
975}; 1037};
976 1038
977static int __init ti_hecc_init_driver(void) 1039static int __init ti_hecc_init_driver(void)
@@ -979,14 +1041,15 @@ static int __init ti_hecc_init_driver(void)
979 printk(KERN_INFO DRV_DESC "\n"); 1041 printk(KERN_INFO DRV_DESC "\n");
980 return platform_driver_register(&ti_hecc_driver); 1042 return platform_driver_register(&ti_hecc_driver);
981} 1043}
982module_init(ti_hecc_init_driver);
983 1044
984static void __exit ti_hecc_exit_driver(void) 1045static void __exit ti_hecc_exit_driver(void)
985{ 1046{
986 printk(KERN_INFO DRV_DESC " unloaded\n"); 1047 printk(KERN_INFO DRV_DESC " unloaded\n");
987 platform_driver_unregister(&ti_hecc_driver); 1048 platform_driver_unregister(&ti_hecc_driver);
988} 1049}
1050
989module_exit(ti_hecc_exit_driver); 1051module_exit(ti_hecc_exit_driver);
1052module_init(ti_hecc_init_driver);
990 1053
991MODULE_AUTHOR("Anant Gole <anantgole@ti.com>"); 1054MODULE_AUTHOR("Anant Gole <anantgole@ti.com>");
992MODULE_LICENSE("GPL v2"); 1055MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index bbc78e0b8a15..97ff6febad63 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -5,6 +5,6 @@ config CAN_EMS_USB
5 tristate "EMS CPC-USB/ARM7 CAN/USB interface" 5 tristate "EMS CPC-USB/ARM7 CAN/USB interface"
6 ---help--- 6 ---help---
7 This driver is for the one channel CPC-USB/ARM7 CAN/USB interface 7 This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
8 from from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de). 8 from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
9 9
10endmenu 10endmenu
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index efbb05c71bf4..11c87840cc00 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -767,6 +767,9 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
767 size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN 767 size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN
768 + sizeof(struct cpc_can_msg); 768 + sizeof(struct cpc_can_msg);
769 769
770 if (can_dropped_invalid_skb(netdev, skb))
771 return NETDEV_TX_OK;
772
770 /* create a URB, and a buffer for it, and copy the data to the URB */ 773 /* create a URB, and a buffer for it, and copy the data to the URB */
771 urb = usb_alloc_urb(0, GFP_ATOMIC); 774 urb = usb_alloc_urb(0, GFP_ATOMIC);
772 if (!urb) { 775 if (!urb) {
@@ -1019,8 +1022,7 @@ static int ems_usb_probe(struct usb_interface *intf,
1019 dev->can.bittiming_const = &ems_usb_bittiming_const; 1022 dev->can.bittiming_const = &ems_usb_bittiming_const;
1020 dev->can.do_set_bittiming = ems_usb_set_bittiming; 1023 dev->can.do_set_bittiming = ems_usb_set_bittiming;
1021 dev->can.do_set_mode = ems_usb_set_mode; 1024 dev->can.do_set_mode = ems_usb_set_mode;
1022 1025 dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
1023 netdev->flags |= IFF_ECHO; /* we support local echo */
1024 1026
1025 netdev->netdev_ops = &ems_usb_netdev_ops; 1027 netdev->netdev_ops = &ems_usb_netdev_ops;
1026 1028
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 80ac56313981..d124d837ae58 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -47,6 +47,7 @@
47#include <linux/if_arp.h> 47#include <linux/if_arp.h>
48#include <linux/if_ether.h> 48#include <linux/if_ether.h>
49#include <linux/can.h> 49#include <linux/can.h>
50#include <linux/can/dev.h>
50#include <net/rtnetlink.h> 51#include <net/rtnetlink.h>
51 52
52static __initdata const char banner[] = 53static __initdata const char banner[] =
@@ -70,10 +71,11 @@ MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
70 71
71static void vcan_rx(struct sk_buff *skb, struct net_device *dev) 72static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
72{ 73{
74 struct can_frame *cf = (struct can_frame *)skb->data;
73 struct net_device_stats *stats = &dev->stats; 75 struct net_device_stats *stats = &dev->stats;
74 76
75 stats->rx_packets++; 77 stats->rx_packets++;
76 stats->rx_bytes += skb->len; 78 stats->rx_bytes += cf->can_dlc;
77 79
78 skb->protocol = htons(ETH_P_CAN); 80 skb->protocol = htons(ETH_P_CAN);
79 skb->pkt_type = PACKET_BROADCAST; 81 skb->pkt_type = PACKET_BROADCAST;
@@ -85,11 +87,15 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
85 87
86static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev) 88static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
87{ 89{
90 struct can_frame *cf = (struct can_frame *)skb->data;
88 struct net_device_stats *stats = &dev->stats; 91 struct net_device_stats *stats = &dev->stats;
89 int loop; 92 int loop;
90 93
94 if (can_dropped_invalid_skb(dev, skb))
95 return NETDEV_TX_OK;
96
91 stats->tx_packets++; 97 stats->tx_packets++;
92 stats->tx_bytes += skb->len; 98 stats->tx_bytes += cf->can_dlc;
93 99
94 /* set flag whether this packet has to be looped back */ 100 /* set flag whether this packet has to be looped back */
95 loop = skb->pkt_type == PACKET_LOOPBACK; 101 loop = skb->pkt_type == PACKET_LOOPBACK;
@@ -103,7 +109,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
103 * CAN core already did the echo for us 109 * CAN core already did the echo for us
104 */ 110 */
105 stats->rx_packets++; 111 stats->rx_packets++;
106 stats->rx_bytes += skb->len; 112 stats->rx_bytes += cf->can_dlc;
107 } 113 }
108 kfree_skb(skb); 114 kfree_skb(skb);
109 return NETDEV_TX_OK; 115 return NETDEV_TX_OK;
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index f857afe8e488..7cbcfb0ade1c 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -66,6 +66,7 @@
66 * by default, the selective clear mask is set up to process rx packets. 66 * by default, the selective clear mask is set up to process rx packets.
67 */ 67 */
68 68
69#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69 70
70#include <linux/module.h> 71#include <linux/module.h>
71#include <linux/kernel.h> 72#include <linux/kernel.h>
@@ -106,7 +107,7 @@
106#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ) 107#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
107#define CAS_NCPUS num_online_cpus() 108#define CAS_NCPUS num_online_cpus()
108 109
109#if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL) 110#ifdef CONFIG_CASSINI_NAPI
110#define USE_NAPI 111#define USE_NAPI
111#define cas_skb_release(x) netif_receive_skb(x) 112#define cas_skb_release(x) netif_receive_skb(x)
112#else 113#else
@@ -143,7 +144,6 @@
143#undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */ 144#undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */
144 145
145#define DRV_MODULE_NAME "cassini" 146#define DRV_MODULE_NAME "cassini"
146#define PFX DRV_MODULE_NAME ": "
147#define DRV_MODULE_VERSION "1.6" 147#define DRV_MODULE_VERSION "1.6"
148#define DRV_MODULE_RELDATE "21 May 2008" 148#define DRV_MODULE_RELDATE "21 May 2008"
149 149
@@ -236,7 +236,7 @@ static u16 link_modes[] __devinitdata = {
236 CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */ 236 CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
237}; 237};
238 238
239static struct pci_device_id cas_pci_tbl[] __devinitdata = { 239static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
240 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI, 240 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN, 242 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
@@ -649,9 +649,8 @@ static cas_page_t *cas_page_dequeue(struct cas *cp)
649 cas_spare_recover(cp, GFP_ATOMIC); 649 cas_spare_recover(cp, GFP_ATOMIC);
650 spin_lock(&cp->rx_spare_lock); 650 spin_lock(&cp->rx_spare_lock);
651 if (list_empty(&cp->rx_spare_list)) { 651 if (list_empty(&cp->rx_spare_list)) {
652 if (netif_msg_rx_err(cp)) 652 netif_err(cp, rx_err, cp->dev,
653 printk(KERN_ERR "%s: no spare buffers " 653 "no spare buffers available\n");
654 "available.\n", cp->dev->name);
655 spin_unlock(&cp->rx_spare_lock); 654 spin_unlock(&cp->rx_spare_lock);
656 return NULL; 655 return NULL;
657 } 656 }
@@ -728,12 +727,10 @@ static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
728#endif 727#endif
729start_aneg: 728start_aneg:
730 if (cp->lstate == link_up) { 729 if (cp->lstate == link_up) {
731 printk(KERN_INFO "%s: PCS link down.\n", 730 netdev_info(cp->dev, "PCS link down\n");
732 cp->dev->name);
733 } else { 731 } else {
734 if (changed) { 732 if (changed) {
735 printk(KERN_INFO "%s: link configuration changed\n", 733 netdev_info(cp->dev, "link configuration changed\n");
736 cp->dev->name);
737 } 734 }
738 } 735 }
739 cp->lstate = link_down; 736 cp->lstate = link_down;
@@ -826,12 +823,12 @@ static int cas_saturn_firmware_init(struct cas *cp)
826 823
827 err = request_firmware(&fw, fw_name, &cp->pdev->dev); 824 err = request_firmware(&fw, fw_name, &cp->pdev->dev);
828 if (err) { 825 if (err) {
829 printk(KERN_ERR "cassini: Failed to load firmware \"%s\"\n", 826 pr_err("Failed to load firmware \"%s\"\n",
830 fw_name); 827 fw_name);
831 return err; 828 return err;
832 } 829 }
833 if (fw->size < 2) { 830 if (fw->size < 2) {
834 printk(KERN_ERR "cassini: bogus length %zu in \"%s\"\n", 831 pr_err("bogus length %zu in \"%s\"\n",
835 fw->size, fw_name); 832 fw->size, fw_name);
836 err = -EINVAL; 833 err = -EINVAL;
837 goto out; 834 goto out;
@@ -841,7 +838,7 @@ static int cas_saturn_firmware_init(struct cas *cp)
841 cp->fw_data = vmalloc(cp->fw_size); 838 cp->fw_data = vmalloc(cp->fw_size);
842 if (!cp->fw_data) { 839 if (!cp->fw_data) {
843 err = -ENOMEM; 840 err = -ENOMEM;
844 printk(KERN_ERR "cassini: \"%s\" Failed %d\n", fw_name, err); 841 pr_err("\"%s\" Failed %d\n", fw_name, err);
845 goto out; 842 goto out;
846 } 843 }
847 memcpy(cp->fw_data, &fw->data[2], cp->fw_size); 844 memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
@@ -986,9 +983,8 @@ static void cas_phy_init(struct cas *cp)
986 break; 983 break;
987 } 984 }
988 if (limit <= 0) 985 if (limit <= 0)
989 printk(KERN_WARNING "%s: PCS reset bit would not " 986 netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
990 "clear [%08x].\n", cp->dev->name, 987 readl(cp->regs + REG_PCS_STATE_MACHINE));
991 readl(cp->regs + REG_PCS_STATE_MACHINE));
992 988
993 /* Make sure PCS is disabled while changing advertisement 989 /* Make sure PCS is disabled while changing advertisement
994 * configuration. 990 * configuration.
@@ -1030,11 +1026,8 @@ static int cas_pcs_link_check(struct cas *cp)
1030 */ 1026 */
1031 if ((stat & (PCS_MII_STATUS_AUTONEG_COMP | 1027 if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
1032 PCS_MII_STATUS_REMOTE_FAULT)) == 1028 PCS_MII_STATUS_REMOTE_FAULT)) ==
1033 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) { 1029 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
1034 if (netif_msg_link(cp)) 1030 netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
1035 printk(KERN_INFO "%s: PCS RemoteFault\n",
1036 cp->dev->name);
1037 }
1038 1031
1039 /* work around link detection issue by querying the PCS state 1032 /* work around link detection issue by querying the PCS state
1040 * machine directly. 1033 * machine directly.
@@ -1081,10 +1074,8 @@ static int cas_pcs_link_check(struct cas *cp)
1081 cp->link_transition = LINK_TRANSITION_ON_FAILURE; 1074 cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1082 } 1075 }
1083 netif_carrier_off(cp->dev); 1076 netif_carrier_off(cp->dev);
1084 if (cp->opened && netif_msg_link(cp)) { 1077 if (cp->opened)
1085 printk(KERN_INFO "%s: PCS link down.\n", 1078 netif_info(cp, link, cp->dev, "PCS link down\n");
1086 cp->dev->name);
1087 }
1088 1079
1089 /* Cassini only: if you force a mode, there can be 1080 /* Cassini only: if you force a mode, there can be
1090 * sync problems on link down. to fix that, the following 1081 * sync problems on link down. to fix that, the following
@@ -1139,9 +1130,8 @@ static int cas_txmac_interrupt(struct net_device *dev,
1139 if (!txmac_stat) 1130 if (!txmac_stat)
1140 return 0; 1131 return 0;
1141 1132
1142 if (netif_msg_intr(cp)) 1133 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1143 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n", 1134 "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
1144 cp->dev->name, txmac_stat);
1145 1135
1146 /* Defer timer expiration is quite normal, 1136 /* Defer timer expiration is quite normal,
1147 * don't even log the event. 1137 * don't even log the event.
@@ -1152,14 +1142,12 @@ static int cas_txmac_interrupt(struct net_device *dev,
1152 1142
1153 spin_lock(&cp->stat_lock[0]); 1143 spin_lock(&cp->stat_lock[0]);
1154 if (txmac_stat & MAC_TX_UNDERRUN) { 1144 if (txmac_stat & MAC_TX_UNDERRUN) {
1155 printk(KERN_ERR "%s: TX MAC xmit underrun.\n", 1145 netdev_err(dev, "TX MAC xmit underrun\n");
1156 dev->name);
1157 cp->net_stats[0].tx_fifo_errors++; 1146 cp->net_stats[0].tx_fifo_errors++;
1158 } 1147 }
1159 1148
1160 if (txmac_stat & MAC_TX_MAX_PACKET_ERR) { 1149 if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1161 printk(KERN_ERR "%s: TX MAC max packet size error.\n", 1150 netdev_err(dev, "TX MAC max packet size error\n");
1162 dev->name);
1163 cp->net_stats[0].tx_errors++; 1151 cp->net_stats[0].tx_errors++;
1164 } 1152 }
1165 1153
@@ -1487,8 +1475,7 @@ static int cas_rxmac_reset(struct cas *cp)
1487 udelay(10); 1475 udelay(10);
1488 } 1476 }
1489 if (limit == STOP_TRIES) { 1477 if (limit == STOP_TRIES) {
1490 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole " 1478 netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
1491 "chip.\n", dev->name);
1492 return 1; 1479 return 1;
1493 } 1480 }
1494 1481
@@ -1500,8 +1487,7 @@ static int cas_rxmac_reset(struct cas *cp)
1500 udelay(10); 1487 udelay(10);
1501 } 1488 }
1502 if (limit == STOP_TRIES) { 1489 if (limit == STOP_TRIES) {
1503 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole " 1490 netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
1504 "chip.\n", dev->name);
1505 return 1; 1491 return 1;
1506 } 1492 }
1507 1493
@@ -1515,8 +1501,7 @@ static int cas_rxmac_reset(struct cas *cp)
1515 udelay(10); 1501 udelay(10);
1516 } 1502 }
1517 if (limit == STOP_TRIES) { 1503 if (limit == STOP_TRIES) {
1518 printk(KERN_ERR "%s: RX reset command will not execute, " 1504 netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
1519 "resetting whole chip.\n", dev->name);
1520 return 1; 1505 return 1;
1521 } 1506 }
1522 1507
@@ -1545,9 +1530,7 @@ static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1545 if (!stat) 1530 if (!stat)
1546 return 0; 1531 return 0;
1547 1532
1548 if (netif_msg_intr(cp)) 1533 netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
1549 printk(KERN_DEBUG "%s: rxmac interrupt, stat: 0x%x\n",
1550 cp->dev->name, stat);
1551 1534
1552 /* these are all rollovers */ 1535 /* these are all rollovers */
1553 spin_lock(&cp->stat_lock[0]); 1536 spin_lock(&cp->stat_lock[0]);
@@ -1580,9 +1563,8 @@ static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1580 if (!stat) 1563 if (!stat)
1581 return 0; 1564 return 0;
1582 1565
1583 if (netif_msg_intr(cp)) 1566 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1584 printk(KERN_DEBUG "%s: mac interrupt, stat: 0x%x\n", 1567 "mac interrupt, stat: 0x%x\n", stat);
1585 cp->dev->name, stat);
1586 1568
1587 /* This interrupt is just for pause frame and pause 1569 /* This interrupt is just for pause frame and pause
1588 * tracking. It is useful for diagnostics and debug 1570 * tracking. It is useful for diagnostics and debug
@@ -1605,9 +1587,7 @@ static inline int cas_mdio_link_not_up(struct cas *cp)
1605 1587
1606 switch (cp->lstate) { 1588 switch (cp->lstate) {
1607 case link_force_ret: 1589 case link_force_ret:
1608 if (netif_msg_link(cp)) 1590 netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
1609 printk(KERN_INFO "%s: Autoneg failed again, keeping"
1610 " forced mode\n", cp->dev->name);
1611 cas_phy_write(cp, MII_BMCR, cp->link_fcntl); 1591 cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1612 cp->timer_ticks = 5; 1592 cp->timer_ticks = 5;
1613 cp->lstate = link_force_ok; 1593 cp->lstate = link_force_ok;
@@ -1675,9 +1655,9 @@ static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1675 cas_mif_poll(cp, 0); 1655 cas_mif_poll(cp, 0);
1676 cp->link_fcntl = cas_phy_read(cp, MII_BMCR); 1656 cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1677 cp->timer_ticks = 5; 1657 cp->timer_ticks = 5;
1678 if (cp->opened && netif_msg_link(cp)) 1658 if (cp->opened)
1679 printk(KERN_INFO "%s: Got link after fallback, retrying" 1659 netif_info(cp, link, cp->dev,
1680 " autoneg once...\n", cp->dev->name); 1660 "Got link after fallback, retrying autoneg once...\n");
1681 cas_phy_write(cp, MII_BMCR, 1661 cas_phy_write(cp, MII_BMCR,
1682 cp->link_fcntl | BMCR_ANENABLE | 1662 cp->link_fcntl | BMCR_ANENABLE |
1683 BMCR_ANRESTART); 1663 BMCR_ANRESTART);
@@ -1704,9 +1684,8 @@ static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1704 cp->link_transition = LINK_TRANSITION_LINK_DOWN; 1684 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1705 1685
1706 netif_carrier_off(cp->dev); 1686 netif_carrier_off(cp->dev);
1707 if (cp->opened && netif_msg_link(cp)) 1687 if (cp->opened)
1708 printk(KERN_INFO "%s: Link down\n", 1688 netif_info(cp, link, cp->dev, "Link down\n");
1709 cp->dev->name);
1710 restart = 1; 1689 restart = 1;
1711 1690
1712 } else if (++cp->timer_ticks > 10) 1691 } else if (++cp->timer_ticks > 10)
@@ -1737,23 +1716,23 @@ static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1737 if (!stat) 1716 if (!stat)
1738 return 0; 1717 return 0;
1739 1718
1740 printk(KERN_ERR "%s: PCI error [%04x:%04x] ", dev->name, stat, 1719 netdev_err(dev, "PCI error [%04x:%04x]",
1741 readl(cp->regs + REG_BIM_DIAG)); 1720 stat, readl(cp->regs + REG_BIM_DIAG));
1742 1721
1743 /* cassini+ has this reserved */ 1722 /* cassini+ has this reserved */
1744 if ((stat & PCI_ERR_BADACK) && 1723 if ((stat & PCI_ERR_BADACK) &&
1745 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0)) 1724 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1746 printk("<No ACK64# during ABS64 cycle> "); 1725 pr_cont(" <No ACK64# during ABS64 cycle>");
1747 1726
1748 if (stat & PCI_ERR_DTRTO) 1727 if (stat & PCI_ERR_DTRTO)
1749 printk("<Delayed transaction timeout> "); 1728 pr_cont(" <Delayed transaction timeout>");
1750 if (stat & PCI_ERR_OTHER) 1729 if (stat & PCI_ERR_OTHER)
1751 printk("<other> "); 1730 pr_cont(" <other>");
1752 if (stat & PCI_ERR_BIM_DMA_WRITE) 1731 if (stat & PCI_ERR_BIM_DMA_WRITE)
1753 printk("<BIM DMA 0 write req> "); 1732 pr_cont(" <BIM DMA 0 write req>");
1754 if (stat & PCI_ERR_BIM_DMA_READ) 1733 if (stat & PCI_ERR_BIM_DMA_READ)
1755 printk("<BIM DMA 0 read req> "); 1734 pr_cont(" <BIM DMA 0 read req>");
1756 printk("\n"); 1735 pr_cont("\n");
1757 1736
1758 if (stat & PCI_ERR_OTHER) { 1737 if (stat & PCI_ERR_OTHER) {
1759 u16 cfg; 1738 u16 cfg;
@@ -1762,25 +1741,19 @@ static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1762 * true cause. 1741 * true cause.
1763 */ 1742 */
1764 pci_read_config_word(cp->pdev, PCI_STATUS, &cfg); 1743 pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
1765 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n", 1744 netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
1766 dev->name, cfg);
1767 if (cfg & PCI_STATUS_PARITY) 1745 if (cfg & PCI_STATUS_PARITY)
1768 printk(KERN_ERR "%s: PCI parity error detected.\n", 1746 netdev_err(dev, "PCI parity error detected\n");
1769 dev->name);
1770 if (cfg & PCI_STATUS_SIG_TARGET_ABORT) 1747 if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
1771 printk(KERN_ERR "%s: PCI target abort.\n", 1748 netdev_err(dev, "PCI target abort\n");
1772 dev->name);
1773 if (cfg & PCI_STATUS_REC_TARGET_ABORT) 1749 if (cfg & PCI_STATUS_REC_TARGET_ABORT)
1774 printk(KERN_ERR "%s: PCI master acks target abort.\n", 1750 netdev_err(dev, "PCI master acks target abort\n");
1775 dev->name);
1776 if (cfg & PCI_STATUS_REC_MASTER_ABORT) 1751 if (cfg & PCI_STATUS_REC_MASTER_ABORT)
1777 printk(KERN_ERR "%s: PCI master abort.\n", dev->name); 1752 netdev_err(dev, "PCI master abort\n");
1778 if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR) 1753 if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
1779 printk(KERN_ERR "%s: PCI system error SERR#.\n", 1754 netdev_err(dev, "PCI system error SERR#\n");
1780 dev->name);
1781 if (cfg & PCI_STATUS_DETECTED_PARITY) 1755 if (cfg & PCI_STATUS_DETECTED_PARITY)
1782 printk(KERN_ERR "%s: PCI parity error.\n", 1756 netdev_err(dev, "PCI parity error\n");
1783 dev->name);
1784 1757
1785 /* Write the error bits back to clear them. */ 1758 /* Write the error bits back to clear them. */
1786 cfg &= (PCI_STATUS_PARITY | 1759 cfg &= (PCI_STATUS_PARITY |
@@ -1806,9 +1779,8 @@ static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1806{ 1779{
1807 if (status & INTR_RX_TAG_ERROR) { 1780 if (status & INTR_RX_TAG_ERROR) {
1808 /* corrupt RX tag framing */ 1781 /* corrupt RX tag framing */
1809 if (netif_msg_rx_err(cp)) 1782 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1810 printk(KERN_DEBUG "%s: corrupt rx tag framing\n", 1783 "corrupt rx tag framing\n");
1811 cp->dev->name);
1812 spin_lock(&cp->stat_lock[0]); 1784 spin_lock(&cp->stat_lock[0]);
1813 cp->net_stats[0].rx_errors++; 1785 cp->net_stats[0].rx_errors++;
1814 spin_unlock(&cp->stat_lock[0]); 1786 spin_unlock(&cp->stat_lock[0]);
@@ -1817,9 +1789,8 @@ static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1817 1789
1818 if (status & INTR_RX_LEN_MISMATCH) { 1790 if (status & INTR_RX_LEN_MISMATCH) {
1819 /* length mismatch. */ 1791 /* length mismatch. */
1820 if (netif_msg_rx_err(cp)) 1792 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1821 printk(KERN_DEBUG "%s: length mismatch for rx frame\n", 1793 "length mismatch for rx frame\n");
1822 cp->dev->name);
1823 spin_lock(&cp->stat_lock[0]); 1794 spin_lock(&cp->stat_lock[0]);
1824 cp->net_stats[0].rx_errors++; 1795 cp->net_stats[0].rx_errors++;
1825 spin_unlock(&cp->stat_lock[0]); 1796 spin_unlock(&cp->stat_lock[0]);
@@ -1861,12 +1832,11 @@ do_reset:
1861#if 1 1832#if 1
1862 atomic_inc(&cp->reset_task_pending); 1833 atomic_inc(&cp->reset_task_pending);
1863 atomic_inc(&cp->reset_task_pending_all); 1834 atomic_inc(&cp->reset_task_pending_all);
1864 printk(KERN_ERR "%s:reset called in cas_abnormal_irq [0x%x]\n", 1835 netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
1865 dev->name, status);
1866 schedule_work(&cp->reset_task); 1836 schedule_work(&cp->reset_task);
1867#else 1837#else
1868 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); 1838 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1869 printk(KERN_ERR "reset called in cas_abnormal_irq\n"); 1839 netdev_err(dev, "reset called in cas_abnormal_irq\n");
1870 schedule_work(&cp->reset_task); 1840 schedule_work(&cp->reset_task);
1871#endif 1841#endif
1872 return 1; 1842 return 1;
@@ -1920,9 +1890,8 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1920 if (count < 0) 1890 if (count < 0)
1921 break; 1891 break;
1922 1892
1923 if (netif_msg_tx_done(cp)) 1893 netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
1924 printk(KERN_DEBUG "%s: tx[%d] done, slot %d\n", 1894 "tx[%d] done, slot %d\n", ring, entry);
1925 cp->dev->name, ring, entry);
1926 1895
1927 skbs[entry] = NULL; 1896 skbs[entry] = NULL;
1928 cp->tx_tiny_use[ring][entry].nbufs = 0; 1897 cp->tx_tiny_use[ring][entry].nbufs = 0;
@@ -1969,9 +1938,9 @@ static void cas_tx(struct net_device *dev, struct cas *cp,
1969#ifdef USE_TX_COMPWB 1938#ifdef USE_TX_COMPWB
1970 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb); 1939 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1971#endif 1940#endif
1972 if (netif_msg_intr(cp)) 1941 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1973 printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %llx\n", 1942 "tx interrupt, status: 0x%x, %llx\n",
1974 cp->dev->name, status, (unsigned long long)compwb); 1943 status, (unsigned long long)compwb);
1975 /* process all the rings */ 1944 /* process all the rings */
1976 for (ring = 0; ring < N_TX_RINGS; ring++) { 1945 for (ring = 0; ring < N_TX_RINGS; ring++) {
1977#ifdef USE_TX_COMPWB 1946#ifdef USE_TX_COMPWB
@@ -2050,10 +2019,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
2050 2019
2051 hlen = min(cp->page_size - off, dlen); 2020 hlen = min(cp->page_size - off, dlen);
2052 if (hlen < 0) { 2021 if (hlen < 0) {
2053 if (netif_msg_rx_err(cp)) { 2022 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2054 printk(KERN_DEBUG "%s: rx page overflow: " 2023 "rx page overflow: %d\n", hlen);
2055 "%d\n", cp->dev->name, hlen);
2056 }
2057 dev_kfree_skb_irq(skb); 2024 dev_kfree_skb_irq(skb);
2058 return -1; 2025 return -1;
2059 } 2026 }
@@ -2130,10 +2097,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
2130 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; 2097 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2131 hlen = min(cp->page_size - off, dlen); 2098 hlen = min(cp->page_size - off, dlen);
2132 if (hlen < 0) { 2099 if (hlen < 0) {
2133 if (netif_msg_rx_err(cp)) { 2100 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2134 printk(KERN_DEBUG "%s: rx page overflow: " 2101 "rx page overflow: %d\n", hlen);
2135 "%d\n", cp->dev->name, hlen);
2136 }
2137 dev_kfree_skb_irq(skb); 2102 dev_kfree_skb_irq(skb);
2138 return -1; 2103 return -1;
2139 } 2104 }
@@ -2265,9 +2230,8 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2265 2230
2266 entry = cp->rx_old[ring]; 2231 entry = cp->rx_old[ring];
2267 2232
2268 if (netif_msg_intr(cp)) 2233 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2269 printk(KERN_DEBUG "%s: rxd[%d] interrupt, done: %d\n", 2234 "rxd[%d] interrupt, done: %d\n", ring, entry);
2270 cp->dev->name, ring, entry);
2271 2235
2272 cluster = -1; 2236 cluster = -1;
2273 count = entry & 0x3; 2237 count = entry & 0x3;
@@ -2337,11 +2301,10 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2337 int entry, drops; 2301 int entry, drops;
2338 int npackets = 0; 2302 int npackets = 0;
2339 2303
2340 if (netif_msg_intr(cp)) 2304 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2341 printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n", 2305 "rx[%d] interrupt, done: %d/%d\n",
2342 cp->dev->name, ring, 2306 ring,
2343 readl(cp->regs + REG_RX_COMP_HEAD), 2307 readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
2344 cp->rx_new[ring]);
2345 2308
2346 entry = cp->rx_new[ring]; 2309 entry = cp->rx_new[ring];
2347 drops = 0; 2310 drops = 0;
@@ -2442,8 +2405,7 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2442 cp->rx_new[ring] = entry; 2405 cp->rx_new[ring] = entry;
2443 2406
2444 if (drops) 2407 if (drops)
2445 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", 2408 netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
2446 cp->dev->name);
2447 return npackets; 2409 return npackets;
2448} 2410}
2449 2411
@@ -2457,10 +2419,9 @@ static void cas_post_rxcs_ringN(struct net_device *dev,
2457 2419
2458 last = cp->rx_cur[ring]; 2420 last = cp->rx_cur[ring];
2459 entry = cp->rx_new[ring]; 2421 entry = cp->rx_new[ring];
2460 if (netif_msg_intr(cp)) 2422 netif_printk(cp, intr, KERN_DEBUG, dev,
2461 printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n", 2423 "rxc[%d] interrupt, done: %d/%d\n",
2462 dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD), 2424 ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
2463 entry);
2464 2425
2465 /* zero and re-mark descriptors */ 2426 /* zero and re-mark descriptors */
2466 while (last != entry) { 2427 while (last != entry) {
@@ -2729,42 +2690,38 @@ static void cas_tx_timeout(struct net_device *dev)
2729{ 2690{
2730 struct cas *cp = netdev_priv(dev); 2691 struct cas *cp = netdev_priv(dev);
2731 2692
2732 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); 2693 netdev_err(dev, "transmit timed out, resetting\n");
2733 if (!cp->hw_running) { 2694 if (!cp->hw_running) {
2734 printk("%s: hrm.. hw not running!\n", dev->name); 2695 netdev_err(dev, "hrm.. hw not running!\n");
2735 return; 2696 return;
2736 } 2697 }
2737 2698
2738 printk(KERN_ERR "%s: MIF_STATE[%08x]\n", 2699 netdev_err(dev, "MIF_STATE[%08x]\n",
2739 dev->name, readl(cp->regs + REG_MIF_STATE_MACHINE)); 2700 readl(cp->regs + REG_MIF_STATE_MACHINE));
2740 2701
2741 printk(KERN_ERR "%s: MAC_STATE[%08x]\n", 2702 netdev_err(dev, "MAC_STATE[%08x]\n",
2742 dev->name, readl(cp->regs + REG_MAC_STATE_MACHINE)); 2703 readl(cp->regs + REG_MAC_STATE_MACHINE));
2743 2704
2744 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x] " 2705 netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2745 "FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n", 2706 readl(cp->regs + REG_TX_CFG),
2746 dev->name, 2707 readl(cp->regs + REG_MAC_TX_STATUS),
2747 readl(cp->regs + REG_TX_CFG), 2708 readl(cp->regs + REG_MAC_TX_CFG),
2748 readl(cp->regs + REG_MAC_TX_STATUS), 2709 readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2749 readl(cp->regs + REG_MAC_TX_CFG), 2710 readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2750 readl(cp->regs + REG_TX_FIFO_PKT_CNT), 2711 readl(cp->regs + REG_TX_FIFO_READ_PTR),
2751 readl(cp->regs + REG_TX_FIFO_WRITE_PTR), 2712 readl(cp->regs + REG_TX_SM_1),
2752 readl(cp->regs + REG_TX_FIFO_READ_PTR), 2713 readl(cp->regs + REG_TX_SM_2));
2753 readl(cp->regs + REG_TX_SM_1), 2714
2754 readl(cp->regs + REG_TX_SM_2)); 2715 netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
2755 2716 readl(cp->regs + REG_RX_CFG),
2756 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n", 2717 readl(cp->regs + REG_MAC_RX_STATUS),
2757 dev->name, 2718 readl(cp->regs + REG_MAC_RX_CFG));
2758 readl(cp->regs + REG_RX_CFG), 2719
2759 readl(cp->regs + REG_MAC_RX_STATUS), 2720 netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
2760 readl(cp->regs + REG_MAC_RX_CFG)); 2721 readl(cp->regs + REG_HP_STATE_MACHINE),
2761 2722 readl(cp->regs + REG_HP_STATUS0),
2762 printk(KERN_ERR "%s: HP_STATE[%08x:%08x:%08x:%08x]\n", 2723 readl(cp->regs + REG_HP_STATUS1),
2763 dev->name, 2724 readl(cp->regs + REG_HP_STATUS2));
2764 readl(cp->regs + REG_HP_STATE_MACHINE),
2765 readl(cp->regs + REG_HP_STATUS0),
2766 readl(cp->regs + REG_HP_STATUS1),
2767 readl(cp->regs + REG_HP_STATUS2));
2768 2725
2769#if 1 2726#if 1
2770 atomic_inc(&cp->reset_task_pending); 2727 atomic_inc(&cp->reset_task_pending);
@@ -2830,8 +2787,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2830 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) { 2787 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2831 netif_stop_queue(dev); 2788 netif_stop_queue(dev);
2832 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); 2789 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2833 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " 2790 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
2834 "queue awake!\n", dev->name);
2835 return 1; 2791 return 1;
2836 } 2792 }
2837 2793
@@ -2908,11 +2864,9 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2908 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)) 2864 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2909 netif_stop_queue(dev); 2865 netif_stop_queue(dev);
2910 2866
2911 if (netif_msg_tx_queued(cp)) 2867 netif_printk(cp, tx_queued, KERN_DEBUG, dev,
2912 printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, " 2868 "tx[%d] queued, slot %d, skblen %d, avail %d\n",
2913 "avail %d\n", 2869 ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
2914 dev->name, ring, entry, skb->len,
2915 TX_BUFFS_AVAIL(cp, ring));
2916 writel(entry, cp->regs + REG_TX_KICKN(ring)); 2870 writel(entry, cp->regs + REG_TX_KICKN(ring));
2917 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); 2871 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2918 return 0; 2872 return 0;
@@ -2999,6 +2953,40 @@ static inline void cas_init_dma(struct cas *cp)
2999 cas_init_rx_dma(cp); 2953 cas_init_rx_dma(cp);
3000} 2954}
3001 2955
2956static void cas_process_mc_list(struct cas *cp)
2957{
2958 u16 hash_table[16];
2959 u32 crc;
2960 struct dev_mc_list *dmi;
2961 int i = 1;
2962
2963 memset(hash_table, 0, sizeof(hash_table));
2964 netdev_for_each_mc_addr(dmi, cp->dev) {
2965 if (i <= CAS_MC_EXACT_MATCH_SIZE) {
2966 /* use the alternate mac address registers for the
2967 * first 15 multicast addresses
2968 */
2969 writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5],
2970 cp->regs + REG_MAC_ADDRN(i*3 + 0));
2971 writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3],
2972 cp->regs + REG_MAC_ADDRN(i*3 + 1));
2973 writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1],
2974 cp->regs + REG_MAC_ADDRN(i*3 + 2));
2975 i++;
2976 }
2977 else {
2978 /* use hw hash table for the next series of
2979 * multicast addresses
2980 */
2981 crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
2982 crc >>= 24;
2983 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
2984 }
2985 }
2986 for (i = 0; i < 16; i++)
2987 writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
2988}
2989
3002/* Must be invoked under cp->lock. */ 2990/* Must be invoked under cp->lock. */
3003static u32 cas_setup_multicast(struct cas *cp) 2991static u32 cas_setup_multicast(struct cas *cp)
3004{ 2992{
@@ -3014,43 +3002,7 @@ static u32 cas_setup_multicast(struct cas *cp)
3014 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; 3002 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
3015 3003
3016 } else { 3004 } else {
3017 u16 hash_table[16]; 3005 cas_process_mc_list(cp);
3018 u32 crc;
3019 struct dev_mc_list *dmi = cp->dev->mc_list;
3020 int i;
3021
3022 /* use the alternate mac address registers for the
3023 * first 15 multicast addresses
3024 */
3025 for (i = 1; i <= CAS_MC_EXACT_MATCH_SIZE; i++) {
3026 if (!dmi) {
3027 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 0));
3028 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 1));
3029 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 2));
3030 continue;
3031 }
3032 writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5],
3033 cp->regs + REG_MAC_ADDRN(i*3 + 0));
3034 writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3],
3035 cp->regs + REG_MAC_ADDRN(i*3 + 1));
3036 writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1],
3037 cp->regs + REG_MAC_ADDRN(i*3 + 2));
3038 dmi = dmi->next;
3039 }
3040
3041 /* use hw hash table for the next series of
3042 * multicast addresses
3043 */
3044 memset(hash_table, 0, sizeof(hash_table));
3045 while (dmi) {
3046 crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
3047 crc >>= 24;
3048 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
3049 dmi = dmi->next;
3050 }
3051 for (i=0; i < 16; i++)
3052 writel(hash_table[i], cp->regs +
3053 REG_MAC_HASH_TABLEN(i));
3054 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; 3006 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
3055 } 3007 }
3056 3008
@@ -3100,10 +3052,10 @@ static void cas_mac_reset(struct cas *cp)
3100 3052
3101 if (readl(cp->regs + REG_MAC_TX_RESET) | 3053 if (readl(cp->regs + REG_MAC_TX_RESET) |
3102 readl(cp->regs + REG_MAC_RX_RESET)) 3054 readl(cp->regs + REG_MAC_RX_RESET))
3103 printk(KERN_ERR "%s: mac tx[%d]/rx[%d] reset failed [%08x]\n", 3055 netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
3104 cp->dev->name, readl(cp->regs + REG_MAC_TX_RESET), 3056 readl(cp->regs + REG_MAC_TX_RESET),
3105 readl(cp->regs + REG_MAC_RX_RESET), 3057 readl(cp->regs + REG_MAC_RX_RESET),
3106 readl(cp->regs + REG_MAC_STATE_MACHINE)); 3058 readl(cp->regs + REG_MAC_STATE_MACHINE));
3107} 3059}
3108 3060
3109 3061
@@ -3423,7 +3375,7 @@ use_random_mac_addr:
3423 goto done; 3375 goto done;
3424 3376
3425 /* Sun MAC prefix then 3 random bytes. */ 3377 /* Sun MAC prefix then 3 random bytes. */
3426 printk(PFX "MAC address not found in ROM VPD\n"); 3378 pr_info("MAC address not found in ROM VPD\n");
3427 dev_addr[0] = 0x08; 3379 dev_addr[0] = 0x08;
3428 dev_addr[1] = 0x00; 3380 dev_addr[1] = 0x00;
3429 dev_addr[2] = 0x20; 3381 dev_addr[2] = 0x20;
@@ -3484,7 +3436,7 @@ static int cas_check_invariants(struct cas *cp)
3484 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT); 3436 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3485 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT; 3437 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3486 } else { 3438 } else {
3487 printk(PFX "MTU limited to %d bytes\n", CAS_MAX_MTU); 3439 printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
3488 } 3440 }
3489 } 3441 }
3490#endif 3442#endif
@@ -3529,7 +3481,7 @@ static int cas_check_invariants(struct cas *cp)
3529 } 3481 }
3530 } 3482 }
3531 } 3483 }
3532 printk(KERN_ERR PFX "MII phy did not respond [%08x]\n", 3484 pr_err("MII phy did not respond [%08x]\n",
3533 readl(cp->regs + REG_MIF_STATE_MACHINE)); 3485 readl(cp->regs + REG_MIF_STATE_MACHINE));
3534 return -1; 3486 return -1;
3535 3487
@@ -3574,21 +3526,19 @@ static inline void cas_start_dma(struct cas *cp)
3574 val = readl(cp->regs + REG_MAC_RX_CFG); 3526 val = readl(cp->regs + REG_MAC_RX_CFG);
3575 if ((val & MAC_RX_CFG_EN)) { 3527 if ((val & MAC_RX_CFG_EN)) {
3576 if (txfailed) { 3528 if (txfailed) {
3577 printk(KERN_ERR 3529 netdev_err(cp->dev,
3578 "%s: enabling mac failed [tx:%08x:%08x].\n", 3530 "enabling mac failed [tx:%08x:%08x]\n",
3579 cp->dev->name, 3531 readl(cp->regs + REG_MIF_STATE_MACHINE),
3580 readl(cp->regs + REG_MIF_STATE_MACHINE), 3532 readl(cp->regs + REG_MAC_STATE_MACHINE));
3581 readl(cp->regs + REG_MAC_STATE_MACHINE));
3582 } 3533 }
3583 goto enable_rx_done; 3534 goto enable_rx_done;
3584 } 3535 }
3585 udelay(10); 3536 udelay(10);
3586 } 3537 }
3587 printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n", 3538 netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
3588 cp->dev->name, 3539 (txfailed ? "tx,rx" : "rx"),
3589 (txfailed? "tx,rx":"rx"), 3540 readl(cp->regs + REG_MIF_STATE_MACHINE),
3590 readl(cp->regs + REG_MIF_STATE_MACHINE), 3541 readl(cp->regs + REG_MAC_STATE_MACHINE));
3591 readl(cp->regs + REG_MAC_STATE_MACHINE));
3592 3542
3593enable_rx_done: 3543enable_rx_done:
3594 cas_unmask_intr(cp); /* enable interrupts */ 3544 cas_unmask_intr(cp); /* enable interrupts */
@@ -3690,9 +3640,8 @@ static void cas_set_link_modes(struct cas *cp)
3690 } 3640 }
3691 } 3641 }
3692 3642
3693 if (netif_msg_link(cp)) 3643 netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
3694 printk(KERN_INFO "%s: Link up at %d Mbps, %s-duplex.\n", 3644 speed, full_duplex ? "full" : "half");
3695 cp->dev->name, speed, (full_duplex ? "full" : "half"));
3696 3645
3697 val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED; 3646 val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3698 if (CAS_PHY_MII(cp->phy_type)) { 3647 if (CAS_PHY_MII(cp->phy_type)) {
@@ -3762,18 +3711,14 @@ static void cas_set_link_modes(struct cas *cp)
3762 3711
3763 if (netif_msg_link(cp)) { 3712 if (netif_msg_link(cp)) {
3764 if (pause & 0x01) { 3713 if (pause & 0x01) {
3765 printk(KERN_INFO "%s: Pause is enabled " 3714 netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
3766 "(rxfifo: %d off: %d on: %d)\n", 3715 cp->rx_fifo_size,
3767 cp->dev->name, 3716 cp->rx_pause_off,
3768 cp->rx_fifo_size, 3717 cp->rx_pause_on);
3769 cp->rx_pause_off,
3770 cp->rx_pause_on);
3771 } else if (pause & 0x10) { 3718 } else if (pause & 0x10) {
3772 printk(KERN_INFO "%s: TX pause enabled\n", 3719 netdev_info(cp->dev, "TX pause enabled\n");
3773 cp->dev->name);
3774 } else { 3720 } else {
3775 printk(KERN_INFO "%s: Pause is disabled\n", 3721 netdev_info(cp->dev, "Pause is disabled\n");
3776 cp->dev->name);
3777 } 3722 }
3778 } 3723 }
3779 3724
@@ -3849,7 +3794,7 @@ static void cas_global_reset(struct cas *cp, int blkflag)
3849 goto done; 3794 goto done;
3850 udelay(10); 3795 udelay(10);
3851 } 3796 }
3852 printk(KERN_ERR "%s: sw reset failed.\n", cp->dev->name); 3797 netdev_err(cp->dev, "sw reset failed\n");
3853 3798
3854done: 3799done:
3855 /* enable various BIM interrupts */ 3800 /* enable various BIM interrupts */
@@ -3955,7 +3900,7 @@ static int cas_change_mtu(struct net_device *dev, int new_mtu)
3955#else 3900#else
3956 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ? 3901 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3957 CAS_RESET_ALL : CAS_RESET_MTU); 3902 CAS_RESET_ALL : CAS_RESET_MTU);
3958 printk(KERN_ERR "reset called in cas_change_mtu\n"); 3903 pr_err("reset called in cas_change_mtu\n");
3959 schedule_work(&cp->reset_task); 3904 schedule_work(&cp->reset_task);
3960#endif 3905#endif
3961 3906
@@ -4237,10 +4182,8 @@ static void cas_link_timer(unsigned long data)
4237 4182
4238 if (((tlm == 0x5) || (tlm == 0x3)) && 4183 if (((tlm == 0x5) || (tlm == 0x3)) &&
4239 (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) { 4184 (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4240 if (netif_msg_tx_err(cp)) 4185 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4241 printk(KERN_DEBUG "%s: tx err: " 4186 "tx err: MAC_STATE[%08x]\n", val);
4242 "MAC_STATE[%08x]\n",
4243 cp->dev->name, val);
4244 reset = 1; 4187 reset = 1;
4245 goto done; 4188 goto done;
4246 } 4189 }
@@ -4249,10 +4192,9 @@ static void cas_link_timer(unsigned long data)
4249 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR); 4192 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4250 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR); 4193 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4251 if ((val == 0) && (wptr != rptr)) { 4194 if ((val == 0) && (wptr != rptr)) {
4252 if (netif_msg_tx_err(cp)) 4195 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4253 printk(KERN_DEBUG "%s: tx err: " 4196 "tx err: TX_FIFO[%08x:%08x:%08x]\n",
4254 "TX_FIFO[%08x:%08x:%08x]\n", 4197 val, wptr, rptr);
4255 cp->dev->name, val, wptr, rptr);
4256 reset = 1; 4198 reset = 1;
4257 } 4199 }
4258 4200
@@ -4268,7 +4210,7 @@ done:
4268 schedule_work(&cp->reset_task); 4210 schedule_work(&cp->reset_task);
4269#else 4211#else
4270 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); 4212 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4271 printk(KERN_ERR "reset called in cas_link_timer\n"); 4213 pr_err("reset called in cas_link_timer\n");
4272 schedule_work(&cp->reset_task); 4214 schedule_work(&cp->reset_task);
4273#endif 4215#endif
4274 } 4216 }
@@ -4361,8 +4303,7 @@ static int cas_open(struct net_device *dev)
4361 */ 4303 */
4362 if (request_irq(cp->pdev->irq, cas_interrupt, 4304 if (request_irq(cp->pdev->irq, cas_interrupt,
4363 IRQF_SHARED, dev->name, (void *) dev)) { 4305 IRQF_SHARED, dev->name, (void *) dev)) {
4364 printk(KERN_ERR "%s: failed to request irq !\n", 4306 netdev_err(cp->dev, "failed to request irq !\n");
4365 cp->dev->name);
4366 err = -EAGAIN; 4307 err = -EAGAIN;
4367 goto err_spare; 4308 goto err_spare;
4368 } 4309 }
@@ -5002,24 +4943,24 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5002 u8 orig_cacheline_size = 0, cas_cacheline_size = 0; 4943 u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
5003 4944
5004 if (cas_version_printed++ == 0) 4945 if (cas_version_printed++ == 0)
5005 printk(KERN_INFO "%s", version); 4946 pr_info("%s", version);
5006 4947
5007 err = pci_enable_device(pdev); 4948 err = pci_enable_device(pdev);
5008 if (err) { 4949 if (err) {
5009 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n"); 4950 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
5010 return err; 4951 return err;
5011 } 4952 }
5012 4953
5013 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 4954 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5014 dev_err(&pdev->dev, "Cannot find proper PCI device " 4955 dev_err(&pdev->dev, "Cannot find proper PCI device "
5015 "base address, aborting.\n"); 4956 "base address, aborting\n");
5016 err = -ENODEV; 4957 err = -ENODEV;
5017 goto err_out_disable_pdev; 4958 goto err_out_disable_pdev;
5018 } 4959 }
5019 4960
5020 dev = alloc_etherdev(sizeof(*cp)); 4961 dev = alloc_etherdev(sizeof(*cp));
5021 if (!dev) { 4962 if (!dev) {
5022 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n"); 4963 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
5023 err = -ENOMEM; 4964 err = -ENOMEM;
5024 goto err_out_disable_pdev; 4965 goto err_out_disable_pdev;
5025 } 4966 }
@@ -5027,7 +4968,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5027 4968
5028 err = pci_request_regions(pdev, dev->name); 4969 err = pci_request_regions(pdev, dev->name);
5029 if (err) { 4970 if (err) {
5030 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n"); 4971 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
5031 goto err_out_free_netdev; 4972 goto err_out_free_netdev;
5032 } 4973 }
5033 pci_set_master(pdev); 4974 pci_set_master(pdev);
@@ -5041,8 +4982,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5041 pci_cmd |= PCI_COMMAND_PARITY; 4982 pci_cmd |= PCI_COMMAND_PARITY;
5042 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); 4983 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5043 if (pci_try_set_mwi(pdev)) 4984 if (pci_try_set_mwi(pdev))
5044 printk(KERN_WARNING PFX "Could not enable MWI for %s\n", 4985 pr_warning("Could not enable MWI for %s\n", pci_name(pdev));
5045 pci_name(pdev));
5046 4986
5047 cas_program_bridge(pdev); 4987 cas_program_bridge(pdev);
5048 4988
@@ -5085,7 +5025,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5085 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 5025 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5086 if (err) { 5026 if (err) {
5087 dev_err(&pdev->dev, "No usable DMA configuration, " 5027 dev_err(&pdev->dev, "No usable DMA configuration, "
5088 "aborting.\n"); 5028 "aborting\n");
5089 goto err_out_free_res; 5029 goto err_out_free_res;
5090 } 5030 }
5091 pci_using_dac = 0; 5031 pci_using_dac = 0;
@@ -5144,7 +5084,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5144 /* give us access to cassini registers */ 5084 /* give us access to cassini registers */
5145 cp->regs = pci_iomap(pdev, 0, casreg_len); 5085 cp->regs = pci_iomap(pdev, 0, casreg_len);
5146 if (!cp->regs) { 5086 if (!cp->regs) {
5147 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n"); 5087 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
5148 goto err_out_free_res; 5088 goto err_out_free_res;
5149 } 5089 }
5150 cp->casreg_len = casreg_len; 5090 cp->casreg_len = casreg_len;
@@ -5163,7 +5103,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5163 pci_alloc_consistent(pdev, sizeof(struct cas_init_block), 5103 pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
5164 &cp->block_dvma); 5104 &cp->block_dvma);
5165 if (!cp->init_block) { 5105 if (!cp->init_block) {
5166 dev_err(&pdev->dev, "Cannot allocate init block, aborting.\n"); 5106 dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
5167 goto err_out_iounmap; 5107 goto err_out_iounmap;
5168 } 5108 }
5169 5109
@@ -5197,18 +5137,17 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5197 dev->features |= NETIF_F_HIGHDMA; 5137 dev->features |= NETIF_F_HIGHDMA;
5198 5138
5199 if (register_netdev(dev)) { 5139 if (register_netdev(dev)) {
5200 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 5140 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
5201 goto err_out_free_consistent; 5141 goto err_out_free_consistent;
5202 } 5142 }
5203 5143
5204 i = readl(cp->regs + REG_BIM_CFG); 5144 i = readl(cp->regs + REG_BIM_CFG);
5205 printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) " 5145 netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
5206 "Ethernet[%d] %pM\n", dev->name, 5146 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5207 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "", 5147 (i & BIM_CFG_32BIT) ? "32" : "64",
5208 (i & BIM_CFG_32BIT) ? "32" : "64", 5148 (i & BIM_CFG_66MHZ) ? "66" : "33",
5209 (i & BIM_CFG_66MHZ) ? "66" : "33", 5149 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5210 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq, 5150 dev->dev_addr);
5211 dev->dev_addr);
5212 5151
5213 pci_set_drvdata(pdev, dev); 5152 pci_set_drvdata(pdev, dev);
5214 cp->hw_running = 1; 5153 cp->hw_running = 1;
@@ -5322,7 +5261,7 @@ static int cas_resume(struct pci_dev *pdev)
5322 struct net_device *dev = pci_get_drvdata(pdev); 5261 struct net_device *dev = pci_get_drvdata(pdev);
5323 struct cas *cp = netdev_priv(dev); 5262 struct cas *cp = netdev_priv(dev);
5324 5263
5325 printk(KERN_INFO "%s: resuming\n", dev->name); 5264 netdev_info(dev, "resuming\n");
5326 5265
5327 mutex_lock(&cp->pm_mutex); 5266 mutex_lock(&cp->pm_mutex);
5328 cas_hard_reset(cp); 5267 cas_hard_reset(cp);
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index 699d22c5fe09..2d11afe45310 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -36,6 +36,8 @@
36 * * 36 * *
37 ****************************************************************************/ 37 ****************************************************************************/
38 38
39#define pr_fmt(fmt) "cxgb: " fmt
40
39#ifndef _CXGB_COMMON_H_ 41#ifndef _CXGB_COMMON_H_
40#define _CXGB_COMMON_H_ 42#define _CXGB_COMMON_H_
41 43
@@ -55,28 +57,6 @@
55#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver" 57#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
56#define DRV_NAME "cxgb" 58#define DRV_NAME "cxgb"
57#define DRV_VERSION "2.2" 59#define DRV_VERSION "2.2"
58#define PFX DRV_NAME ": "
59
60#define CH_ERR(fmt, ...) printk(KERN_ERR PFX fmt, ## __VA_ARGS__)
61#define CH_WARN(fmt, ...) printk(KERN_WARNING PFX fmt, ## __VA_ARGS__)
62#define CH_ALERT(fmt, ...) printk(KERN_ALERT PFX fmt, ## __VA_ARGS__)
63
64/*
65 * More powerful macro that selectively prints messages based on msg_enable.
66 * For info and debugging messages.
67 */
68#define CH_MSG(adapter, level, category, fmt, ...) do { \
69 if ((adapter)->msg_enable & NETIF_MSG_##category) \
70 printk(KERN_##level PFX "%s: " fmt, (adapter)->name, \
71 ## __VA_ARGS__); \
72} while (0)
73
74#ifdef DEBUG
75# define CH_DBG(adapter, category, fmt, ...) \
76 CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
77#else
78# define CH_DBG(fmt, ...)
79#endif
80 60
81#define CH_DEVICE(devid, ssid, idx) \ 61#define CH_DEVICE(devid, ssid, idx) \
82 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx } 62 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
@@ -90,25 +70,13 @@
90typedef struct adapter adapter_t; 70typedef struct adapter adapter_t;
91 71
92struct t1_rx_mode { 72struct t1_rx_mode {
93 struct net_device *dev; 73 struct net_device *dev;
94 u32 idx;
95 struct dev_mc_list *list;
96}; 74};
97 75
98#define t1_rx_mode_promisc(rm) (rm->dev->flags & IFF_PROMISC) 76#define t1_rx_mode_promisc(rm) (rm->dev->flags & IFF_PROMISC)
99#define t1_rx_mode_allmulti(rm) (rm->dev->flags & IFF_ALLMULTI) 77#define t1_rx_mode_allmulti(rm) (rm->dev->flags & IFF_ALLMULTI)
100#define t1_rx_mode_mc_cnt(rm) (rm->dev->mc_count) 78#define t1_rx_mode_mc_cnt(rm) (netdev_mc_count(rm->dev))
101 79#define t1_get_netdev(rm) (rm->dev)
102static inline u8 *t1_get_next_mcaddr(struct t1_rx_mode *rm)
103{
104 u8 *addr = NULL;
105
106 if (rm->idx++ < rm->dev->mc_count) {
107 addr = rm->list->dmi_addr;
108 rm->list = rm->list->next;
109 }
110 return addr;
111}
112 80
113#define MAX_NPORTS 4 81#define MAX_NPORTS 4
114#define PORT_MASK ((1 << MAX_NPORTS) - 1) 82#define PORT_MASK ((1 << MAX_NPORTS) - 1)
@@ -334,7 +302,7 @@ static inline int t1_is_asic(const adapter_t *adapter)
334 return adapter->params.is_asic; 302 return adapter->params.is_asic;
335} 303}
336 304
337extern struct pci_device_id t1_pci_tbl[]; 305extern const struct pci_device_id t1_pci_tbl[];
338 306
339static inline int adapter_matches_type(const adapter_t *adapter, 307static inline int adapter_matches_type(const adapter_t *adapter,
340 int version, int revision) 308 int version, int revision)
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index 082cdb28b510..0f71304e0542 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -125,8 +125,6 @@ static void t1_set_rxmode(struct net_device *dev)
125 struct t1_rx_mode rm; 125 struct t1_rx_mode rm;
126 126
127 rm.dev = dev; 127 rm.dev = dev;
128 rm.idx = 0;
129 rm.list = dev->mc_list;
130 mac->ops->set_rx_mode(mac, &rm); 128 mac->ops->set_rx_mode(mac, &rm);
131} 129}
132 130
@@ -976,7 +974,7 @@ void t1_fatal_err(struct adapter *adapter)
976 t1_sge_stop(adapter->sge); 974 t1_sge_stop(adapter->sge);
977 t1_interrupts_disable(adapter); 975 t1_interrupts_disable(adapter);
978 } 976 }
979 CH_ALERT("%s: encountered fatal error, operation suspended\n", 977 pr_alert("%s: encountered fatal error, operation suspended\n",
980 adapter->name); 978 adapter->name);
981} 979}
982 980
@@ -1020,7 +1018,7 @@ static int __devinit init_one(struct pci_dev *pdev,
1020 return err; 1018 return err;
1021 1019
1022 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 1020 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1023 CH_ERR("%s: cannot find PCI device memory base address\n", 1021 pr_err("%s: cannot find PCI device memory base address\n",
1024 pci_name(pdev)); 1022 pci_name(pdev));
1025 err = -ENODEV; 1023 err = -ENODEV;
1026 goto out_disable_pdev; 1024 goto out_disable_pdev;
@@ -1030,20 +1028,20 @@ static int __devinit init_one(struct pci_dev *pdev,
1030 pci_using_dac = 1; 1028 pci_using_dac = 1;
1031 1029
1032 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 1030 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1033 CH_ERR("%s: unable to obtain 64-bit DMA for " 1031 pr_err("%s: unable to obtain 64-bit DMA for "
1034 "consistent allocations\n", pci_name(pdev)); 1032 "consistent allocations\n", pci_name(pdev));
1035 err = -ENODEV; 1033 err = -ENODEV;
1036 goto out_disable_pdev; 1034 goto out_disable_pdev;
1037 } 1035 }
1038 1036
1039 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { 1037 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
1040 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev)); 1038 pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
1041 goto out_disable_pdev; 1039 goto out_disable_pdev;
1042 } 1040 }
1043 1041
1044 err = pci_request_regions(pdev, DRV_NAME); 1042 err = pci_request_regions(pdev, DRV_NAME);
1045 if (err) { 1043 if (err) {
1046 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev)); 1044 pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
1047 goto out_disable_pdev; 1045 goto out_disable_pdev;
1048 } 1046 }
1049 1047
@@ -1071,7 +1069,7 @@ static int __devinit init_one(struct pci_dev *pdev,
1071 1069
1072 adapter->regs = ioremap(mmio_start, mmio_len); 1070 adapter->regs = ioremap(mmio_start, mmio_len);
1073 if (!adapter->regs) { 1071 if (!adapter->regs) {
1074 CH_ERR("%s: cannot map device registers\n", 1072 pr_err("%s: cannot map device registers\n",
1075 pci_name(pdev)); 1073 pci_name(pdev));
1076 err = -ENOMEM; 1074 err = -ENOMEM;
1077 goto out_free_dev; 1075 goto out_free_dev;
@@ -1150,8 +1148,8 @@ static int __devinit init_one(struct pci_dev *pdev,
1150 for (i = 0; i < bi->port_number; ++i) { 1148 for (i = 0; i < bi->port_number; ++i) {
1151 err = register_netdev(adapter->port[i].dev); 1149 err = register_netdev(adapter->port[i].dev);
1152 if (err) 1150 if (err)
1153 CH_WARN("%s: cannot register net device %s, skipping\n", 1151 pr_warning("%s: cannot register net device %s, skipping\n",
1154 pci_name(pdev), adapter->port[i].dev->name); 1152 pci_name(pdev), adapter->port[i].dev->name);
1155 else { 1153 else {
1156 /* 1154 /*
1157 * Change the name we use for messages to the name of 1155 * Change the name we use for messages to the name of
@@ -1164,7 +1162,7 @@ static int __devinit init_one(struct pci_dev *pdev,
1164 } 1162 }
1165 } 1163 }
1166 if (!adapter->registered_device_map) { 1164 if (!adapter->registered_device_map) {
1167 CH_ERR("%s: could not register any net devices\n", 1165 pr_err("%s: could not register any net devices\n",
1168 pci_name(pdev)); 1166 pci_name(pdev));
1169 goto out_release_adapter_res; 1167 goto out_release_adapter_res;
1170 } 1168 }
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c
index 1e0749e000b0..639ff1955739 100644
--- a/drivers/net/chelsio/espi.c
+++ b/drivers/net/chelsio/espi.c
@@ -76,7 +76,7 @@ static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr,
76 } while (busy && --attempts); 76 } while (busy && --attempts);
77 77
78 if (busy) 78 if (busy)
79 CH_ERR("%s: TRICN write timed out\n", adapter->name); 79 pr_err("%s: TRICN write timed out\n", adapter->name);
80 80
81 return busy; 81 return busy;
82} 82}
@@ -86,7 +86,7 @@ static int tricn_init(adapter_t *adapter)
86 int i, sme = 1; 86 int i, sme = 1;
87 87
88 if (!(readl(adapter->regs + A_ESPI_RX_RESET) & F_RX_CLK_STATUS)) { 88 if (!(readl(adapter->regs + A_ESPI_RX_RESET) & F_RX_CLK_STATUS)) {
89 CH_ERR("%s: ESPI clock not ready\n", adapter->name); 89 pr_err("%s: ESPI clock not ready\n", adapter->name);
90 return -1; 90 return -1;
91 } 91 }
92 92
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
index 2117c4fbb107..a6eb30a6e2b9 100644
--- a/drivers/net/chelsio/pm3393.c
+++ b/drivers/net/chelsio/pm3393.c
@@ -251,8 +251,9 @@ static int pm3393_interrupt_handler(struct cmac *cmac)
251 /* Read the master interrupt status register. */ 251 /* Read the master interrupt status register. */
252 pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, 252 pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS,
253 &master_intr_status); 253 &master_intr_status);
254 CH_DBG(cmac->adapter, INTR, "PM3393 intr cause 0x%x\n", 254 if (netif_msg_intr(cmac->adapter))
255 master_intr_status); 255 dev_dbg(&cmac->adapter->pdev->dev, "PM3393 intr cause 0x%x\n",
256 master_intr_status);
256 257
257 /* TBD XXX Lets just clear everything for now */ 258 /* TBD XXX Lets just clear everything for now */
258 pm3393_interrupt_clear(cmac); 259 pm3393_interrupt_clear(cmac);
@@ -375,12 +376,12 @@ static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
375 rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN; 376 rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
376 } else if (t1_rx_mode_mc_cnt(rm)) { 377 } else if (t1_rx_mode_mc_cnt(rm)) {
377 /* Accept one or more multicast(s). */ 378 /* Accept one or more multicast(s). */
378 u8 *addr; 379 struct dev_mc_list *dmi;
379 int bit; 380 int bit;
380 u16 mc_filter[4] = { 0, }; 381 u16 mc_filter[4] = { 0, };
381 382
382 while ((addr = t1_get_next_mcaddr(rm))) { 383 netdev_for_each_mc_addr(dmi, t1_get_netdev(rm)) {
383 bit = (ether_crc(ETH_ALEN, addr) >> 23) & 0x3f; /* bit[23:28] */ 384 bit = (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 23) & 0x3f; /* bit[23:28] */
384 mc_filter[bit >> 4] |= 1 << (bit & 0xf); 385 mc_filter[bit >> 4] |= 1 << (bit & 0xf);
385 } 386 }
386 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]); 387 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
@@ -776,11 +777,12 @@ static int pm3393_mac_reset(adapter_t * adapter)
776 successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock 777 successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock
777 && is_xaui_mabc_pll_locked); 778 && is_xaui_mabc_pll_locked);
778 779
779 CH_DBG(adapter, HW, 780 if (netif_msg_hw(adapter))
780 "PM3393 HW reset %d: pl4_reset 0x%x, val 0x%x, " 781 dev_dbg(&adapter->pdev->dev,
781 "is_pl4_outof_lock 0x%x, xaui_locked 0x%x\n", 782 "PM3393 HW reset %d: pl4_reset 0x%x, val 0x%x, "
782 i, is_pl4_reset_finished, val, is_pl4_outof_lock, 783 "is_pl4_outof_lock 0x%x, xaui_locked 0x%x\n",
783 is_xaui_mabc_pll_locked); 784 i, is_pl4_reset_finished, val,
785 is_pl4_outof_lock, is_xaui_mabc_pll_locked);
784 } 786 }
785 return successful_reset ? 0 : 1; 787 return successful_reset ? 0 : 1;
786} 788}
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 109d2783e4d8..71384114a4ed 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -267,7 +267,7 @@ struct sge {
267 struct sk_buff *espibug_skb[MAX_NPORTS]; 267 struct sk_buff *espibug_skb[MAX_NPORTS];
268 u32 sge_control; /* shadow value of sge control reg */ 268 u32 sge_control; /* shadow value of sge control reg */
269 struct sge_intr_counts stats; 269 struct sge_intr_counts stats;
270 struct sge_port_stats *port_stats[MAX_NPORTS]; 270 struct sge_port_stats __percpu *port_stats[MAX_NPORTS];
271 struct sched *tx_sched; 271 struct sched *tx_sched;
272 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp; 272 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
273}; 273};
@@ -953,7 +953,7 @@ int t1_sge_intr_error_handler(struct sge *sge)
953 sge->stats.respQ_empty++; 953 sge->stats.respQ_empty++;
954 if (cause & F_RESPQ_OVERFLOW) { 954 if (cause & F_RESPQ_OVERFLOW) {
955 sge->stats.respQ_overflow++; 955 sge->stats.respQ_overflow++;
956 CH_ALERT("%s: SGE response queue overflow\n", 956 pr_alert("%s: SGE response queue overflow\n",
957 adapter->name); 957 adapter->name);
958 } 958 }
959 if (cause & F_FL_EXHAUSTED) { 959 if (cause & F_FL_EXHAUSTED) {
@@ -962,12 +962,12 @@ int t1_sge_intr_error_handler(struct sge *sge)
962 } 962 }
963 if (cause & F_PACKET_TOO_BIG) { 963 if (cause & F_PACKET_TOO_BIG) {
964 sge->stats.pkt_too_big++; 964 sge->stats.pkt_too_big++;
965 CH_ALERT("%s: SGE max packet size exceeded\n", 965 pr_alert("%s: SGE max packet size exceeded\n",
966 adapter->name); 966 adapter->name);
967 } 967 }
968 if (cause & F_PACKET_MISMATCH) { 968 if (cause & F_PACKET_MISMATCH) {
969 sge->stats.pkt_mismatch++; 969 sge->stats.pkt_mismatch++;
970 CH_ALERT("%s: SGE packet mismatch\n", adapter->name); 970 pr_alert("%s: SGE packet mismatch\n", adapter->name);
971 } 971 }
972 if (cause & SGE_INT_FATAL) 972 if (cause & SGE_INT_FATAL)
973 t1_fatal_err(adapter); 973 t1_fatal_err(adapter);
@@ -1101,7 +1101,7 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
1101 1101
1102 pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr), 1102 pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr),
1103 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); 1103 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1104 CH_ERR("%s: unexpected offload packet, cmd %u\n", 1104 pr_err("%s: unexpected offload packet, cmd %u\n",
1105 adapter->name, *skb->data); 1105 adapter->name, *skb->data);
1106 recycle_fl_buf(fl, fl->cidx); 1106 recycle_fl_buf(fl, fl->cidx);
1107} 1107}
@@ -1687,7 +1687,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1687 netif_stop_queue(dev); 1687 netif_stop_queue(dev);
1688 set_bit(dev->if_port, &sge->stopped_tx_queues); 1688 set_bit(dev->if_port, &sge->stopped_tx_queues);
1689 sge->stats.cmdQ_full[2]++; 1689 sge->stats.cmdQ_full[2]++;
1690 CH_ERR("%s: Tx ring full while queue awake!\n", 1690 pr_err("%s: Tx ring full while queue awake!\n",
1691 adapter->name); 1691 adapter->name);
1692 } 1692 }
1693 spin_unlock(&q->lock); 1693 spin_unlock(&q->lock);
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 17720c6e5bfe..53bde15fc94d 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -90,7 +90,7 @@ int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
90 tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1, 90 tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
91 TPI_ATTEMPTS, 3); 91 TPI_ATTEMPTS, 3);
92 if (tpi_busy) 92 if (tpi_busy)
93 CH_ALERT("%s: TPI write to 0x%x failed\n", 93 pr_alert("%s: TPI write to 0x%x failed\n",
94 adapter->name, addr); 94 adapter->name, addr);
95 return tpi_busy; 95 return tpi_busy;
96} 96}
@@ -118,7 +118,7 @@ int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
118 tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1, 118 tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
119 TPI_ATTEMPTS, 3); 119 TPI_ATTEMPTS, 3);
120 if (tpi_busy) 120 if (tpi_busy)
121 CH_ALERT("%s: TPI read from 0x%x failed\n", 121 pr_alert("%s: TPI read from 0x%x failed\n",
122 adapter->name, addr); 122 adapter->name, addr);
123 else 123 else
124 *valp = readl(adapter->regs + A_TPI_RD_DATA); 124 *valp = readl(adapter->regs + A_TPI_RD_DATA);
@@ -262,7 +262,7 @@ static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg)
262 udelay(10); 262 udelay(10);
263 } while (busy && --attempts); 263 } while (busy && --attempts);
264 if (busy) 264 if (busy)
265 CH_ALERT("%s: MDIO operation timed out\n", adapter->name); 265 pr_alert("%s: MDIO operation timed out\n", adapter->name);
266 return busy; 266 return busy;
267} 267}
268 268
@@ -528,7 +528,7 @@ static const struct board_info t1_board[] = {
528 528
529}; 529};
530 530
531struct pci_device_id t1_pci_tbl[] = { 531DEFINE_PCI_DEVICE_TABLE(t1_pci_tbl) = {
532 CH_DEVICE(8, 0, CH_BRD_T110_1CU), 532 CH_DEVICE(8, 0, CH_BRD_T110_1CU),
533 CH_DEVICE(8, 1, CH_BRD_T110_1CU), 533 CH_DEVICE(8, 1, CH_BRD_T110_1CU),
534 CH_DEVICE(7, 0, CH_BRD_N110_1F), 534 CH_DEVICE(7, 0, CH_BRD_N110_1F),
@@ -581,7 +581,7 @@ int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data)
581 } while (!(val & F_VPD_OP_FLAG) && --i); 581 } while (!(val & F_VPD_OP_FLAG) && --i);
582 582
583 if (!(val & F_VPD_OP_FLAG)) { 583 if (!(val & F_VPD_OP_FLAG)) {
584 CH_ERR("%s: reading EEPROM address 0x%x failed\n", 584 pr_err("%s: reading EEPROM address 0x%x failed\n",
585 adapter->name, addr); 585 adapter->name, addr);
586 return -EIO; 586 return -EIO;
587 } 587 }
@@ -734,8 +734,9 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
734 break; 734 break;
735 case CHBT_BOARD_8000: 735 case CHBT_BOARD_8000:
736 case CHBT_BOARD_CHT110: 736 case CHBT_BOARD_CHT110:
737 CH_DBG(adapter, INTR, "External interrupt cause 0x%x\n", 737 if (netif_msg_intr(adapter))
738 cause); 738 dev_dbg(&adapter->pdev->dev,
739 "External interrupt cause 0x%x\n", cause);
739 if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */ 740 if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */
740 struct cmac *mac = adapter->port[0].mac; 741 struct cmac *mac = adapter->port[0].mac;
741 742
@@ -746,8 +747,9 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
746 747
747 t1_tpi_read(adapter, 748 t1_tpi_read(adapter,
748 A_ELMER0_GPI_STAT, &mod_detect); 749 A_ELMER0_GPI_STAT, &mod_detect);
749 CH_MSG(adapter, INFO, LINK, "XPAK %s\n", 750 if (netif_msg_link(adapter))
750 mod_detect ? "removed" : "inserted"); 751 dev_info(&adapter->pdev->dev, "XPAK %s\n",
752 mod_detect ? "removed" : "inserted");
751 } 753 }
752 break; 754 break;
753#ifdef CONFIG_CHELSIO_T1_COUGAR 755#ifdef CONFIG_CHELSIO_T1_COUGAR
@@ -1084,7 +1086,7 @@ static void __devinit init_link_config(struct link_config *lc,
1084 1086
1085#ifdef CONFIG_CHELSIO_T1_COUGAR 1087#ifdef CONFIG_CHELSIO_T1_COUGAR
1086 if (bi->clock_cspi && !(adapter->cspi = t1_cspi_create(adapter))) { 1088 if (bi->clock_cspi && !(adapter->cspi = t1_cspi_create(adapter))) {
1087 CH_ERR("%s: CSPI initialization failed\n", 1089 pr_err("%s: CSPI initialization failed\n",
1088 adapter->name); 1090 adapter->name);
1089 goto error; 1091 goto error;
1090 } 1092 }
@@ -1105,20 +1107,20 @@ int __devinit t1_init_sw_modules(adapter_t *adapter,
1105 1107
1106 adapter->sge = t1_sge_create(adapter, &adapter->params.sge); 1108 adapter->sge = t1_sge_create(adapter, &adapter->params.sge);
1107 if (!adapter->sge) { 1109 if (!adapter->sge) {
1108 CH_ERR("%s: SGE initialization failed\n", 1110 pr_err("%s: SGE initialization failed\n",
1109 adapter->name); 1111 adapter->name);
1110 goto error; 1112 goto error;
1111 } 1113 }
1112 1114
1113 if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) { 1115 if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) {
1114 CH_ERR("%s: ESPI initialization failed\n", 1116 pr_err("%s: ESPI initialization failed\n",
1115 adapter->name); 1117 adapter->name);
1116 goto error; 1118 goto error;
1117 } 1119 }
1118 1120
1119 adapter->tp = t1_tp_create(adapter, &adapter->params.tp); 1121 adapter->tp = t1_tp_create(adapter, &adapter->params.tp);
1120 if (!adapter->tp) { 1122 if (!adapter->tp) {
1121 CH_ERR("%s: TP initialization failed\n", 1123 pr_err("%s: TP initialization failed\n",
1122 adapter->name); 1124 adapter->name);
1123 goto error; 1125 goto error;
1124 } 1126 }
@@ -1138,14 +1140,14 @@ int __devinit t1_init_sw_modules(adapter_t *adapter,
1138 adapter->port[i].phy = bi->gphy->create(adapter->port[i].dev, 1140 adapter->port[i].phy = bi->gphy->create(adapter->port[i].dev,
1139 phy_addr, bi->mdio_ops); 1141 phy_addr, bi->mdio_ops);
1140 if (!adapter->port[i].phy) { 1142 if (!adapter->port[i].phy) {
1141 CH_ERR("%s: PHY %d initialization failed\n", 1143 pr_err("%s: PHY %d initialization failed\n",
1142 adapter->name, i); 1144 adapter->name, i);
1143 goto error; 1145 goto error;
1144 } 1146 }
1145 1147
1146 adapter->port[i].mac = mac = bi->gmac->create(adapter, i); 1148 adapter->port[i].mac = mac = bi->gmac->create(adapter, i);
1147 if (!mac) { 1149 if (!mac) {
1148 CH_ERR("%s: MAC %d initialization failed\n", 1150 pr_err("%s: MAC %d initialization failed\n",
1149 adapter->name, i); 1151 adapter->name, i);
1150 goto error; 1152 goto error;
1151 } 1153 }
@@ -1157,7 +1159,7 @@ int __devinit t1_init_sw_modules(adapter_t *adapter,
1157 if (!t1_is_asic(adapter) || bi->chip_mac == CHBT_MAC_DUMMY) 1159 if (!t1_is_asic(adapter) || bi->chip_mac == CHBT_MAC_DUMMY)
1158 mac->ops->macaddress_get(mac, hw_addr); 1160 mac->ops->macaddress_get(mac, hw_addr);
1159 else if (vpd_macaddress_get(adapter, i, hw_addr)) { 1161 else if (vpd_macaddress_get(adapter, i, hw_addr)) {
1160 CH_ERR("%s: could not read MAC address from VPD ROM\n", 1162 pr_err("%s: could not read MAC address from VPD ROM\n",
1161 adapter->port[i].dev->name); 1163 adapter->port[i].dev->name);
1162 goto error; 1164 goto error;
1163 } 1165 }
diff --git a/drivers/net/chelsio/vsc7326.c b/drivers/net/chelsio/vsc7326.c
index 99b51f61fe77..c844111cffeb 100644
--- a/drivers/net/chelsio/vsc7326.c
+++ b/drivers/net/chelsio/vsc7326.c
@@ -48,14 +48,14 @@ static void vsc_read(adapter_t *adapter, u32 addr, u32 *val)
48 i++; 48 i++;
49 } while (((status & 1) == 0) && (i < 50)); 49 } while (((status & 1) == 0) && (i < 50));
50 if (i == 50) 50 if (i == 50)
51 CH_ERR("Invalid tpi read from MAC, breaking loop.\n"); 51 pr_err("Invalid tpi read from MAC, breaking loop.\n");
52 52
53 t1_tpi_read(adapter, (REG_LOCAL_DATA << 2) + 4, &vlo); 53 t1_tpi_read(adapter, (REG_LOCAL_DATA << 2) + 4, &vlo);
54 t1_tpi_read(adapter, REG_LOCAL_DATA << 2, &vhi); 54 t1_tpi_read(adapter, REG_LOCAL_DATA << 2, &vhi);
55 55
56 *val = (vhi << 16) | vlo; 56 *val = (vhi << 16) | vlo;
57 57
58 /* CH_ERR("rd: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n", 58 /* pr_err("rd: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n",
59 ((addr&0xe000)>>13), ((addr&0x1e00)>>9), 59 ((addr&0xe000)>>13), ((addr&0x1e00)>>9),
60 ((addr&0x01fe)>>1), *val); */ 60 ((addr&0x01fe)>>1), *val); */
61 spin_unlock_bh(&adapter->mac_lock); 61 spin_unlock_bh(&adapter->mac_lock);
@@ -66,7 +66,7 @@ static void vsc_write(adapter_t *adapter, u32 addr, u32 data)
66 spin_lock_bh(&adapter->mac_lock); 66 spin_lock_bh(&adapter->mac_lock);
67 t1_tpi_write(adapter, (addr << 2) + 4, data & 0xFFFF); 67 t1_tpi_write(adapter, (addr << 2) + 4, data & 0xFFFF);
68 t1_tpi_write(adapter, addr << 2, (data >> 16) & 0xFFFF); 68 t1_tpi_write(adapter, addr << 2, (data >> 16) & 0xFFFF);
69 /* CH_ERR("wr: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n", 69 /* pr_err("wr: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n",
70 ((addr&0xe000)>>13), ((addr&0x1e00)>>9), 70 ((addr&0xe000)>>13), ((addr&0x1e00)>>9),
71 ((addr&0x01fe)>>1), data); */ 71 ((addr&0x01fe)>>1), data); */
72 spin_unlock_bh(&adapter->mac_lock); 72 spin_unlock_bh(&adapter->mac_lock);
@@ -225,7 +225,7 @@ static void run_table(adapter_t *adapter, struct init_table *ib, int len)
225 for (i = 0; i < len; i++) { 225 for (i = 0; i < len; i++) {
226 if (ib[i].addr == INITBLOCK_SLEEP) { 226 if (ib[i].addr == INITBLOCK_SLEEP) {
227 udelay( ib[i].data ); 227 udelay( ib[i].data );
228 CH_ERR("sleep %d us\n",ib[i].data); 228 pr_err("sleep %d us\n",ib[i].data);
229 } else 229 } else
230 vsc_write( adapter, ib[i].addr, ib[i].data ); 230 vsc_write( adapter, ib[i].addr, ib[i].data );
231 } 231 }
@@ -241,7 +241,7 @@ static int bist_rd(adapter_t *adapter, int moduleid, int address)
241 (address != 0x2) && 241 (address != 0x2) &&
242 (address != 0xd) && 242 (address != 0xd) &&
243 (address != 0xe)) 243 (address != 0xe))
244 CH_ERR("No bist address: 0x%x\n", address); 244 pr_err("No bist address: 0x%x\n", address);
245 245
246 data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) | 246 data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) |
247 ((moduleid & 0xff) << 0)); 247 ((moduleid & 0xff) << 0));
@@ -251,9 +251,9 @@ static int bist_rd(adapter_t *adapter, int moduleid, int address)
251 251
252 vsc_read(adapter, REG_RAM_BIST_RESULT, &result); 252 vsc_read(adapter, REG_RAM_BIST_RESULT, &result);
253 if ((result & (1 << 9)) != 0x0) 253 if ((result & (1 << 9)) != 0x0)
254 CH_ERR("Still in bist read: 0x%x\n", result); 254 pr_err("Still in bist read: 0x%x\n", result);
255 else if ((result & (1 << 8)) != 0x0) 255 else if ((result & (1 << 8)) != 0x0)
256 CH_ERR("bist read error: 0x%x\n", result); 256 pr_err("bist read error: 0x%x\n", result);
257 257
258 return (result & 0xff); 258 return (result & 0xff);
259} 259}
@@ -268,10 +268,10 @@ static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
268 (address != 0x2) && 268 (address != 0x2) &&
269 (address != 0xd) && 269 (address != 0xd) &&
270 (address != 0xe)) 270 (address != 0xe))
271 CH_ERR("No bist address: 0x%x\n", address); 271 pr_err("No bist address: 0x%x\n", address);
272 272
273 if (value > 255) 273 if (value > 255)
274 CH_ERR("Suspicious write out of range value: 0x%x\n", value); 274 pr_err("Suspicious write out of range value: 0x%x\n", value);
275 275
276 data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) | 276 data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) |
277 ((moduleid & 0xff) << 0)); 277 ((moduleid & 0xff) << 0));
@@ -281,9 +281,9 @@ static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
281 281
282 vsc_read(adapter, REG_RAM_BIST_CMD, &result); 282 vsc_read(adapter, REG_RAM_BIST_CMD, &result);
283 if ((result & (1 << 27)) != 0x0) 283 if ((result & (1 << 27)) != 0x0)
284 CH_ERR("Still in bist write: 0x%x\n", result); 284 pr_err("Still in bist write: 0x%x\n", result);
285 else if ((result & (1 << 26)) != 0x0) 285 else if ((result & (1 << 26)) != 0x0)
286 CH_ERR("bist write error: 0x%x\n", result); 286 pr_err("bist write error: 0x%x\n", result);
287 287
288 return 0; 288 return 0;
289} 289}
@@ -306,7 +306,7 @@ static int check_bist(adapter_t *adapter, int moduleid)
306 column = ((bist_rd(adapter,moduleid, 0x0e)<<8) + 306 column = ((bist_rd(adapter,moduleid, 0x0e)<<8) +
307 (bist_rd(adapter,moduleid, 0x0d))); 307 (bist_rd(adapter,moduleid, 0x0d)));
308 if ((result & 3) != 0x3) 308 if ((result & 3) != 0x3)
309 CH_ERR("Result: 0x%x BIST error in ram %d, column: 0x%04x\n", 309 pr_err("Result: 0x%x BIST error in ram %d, column: 0x%04x\n",
310 result, moduleid, column); 310 result, moduleid, column);
311 return 0; 311 return 0;
312} 312}
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 4332b3a2fafb..9781942992e9 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -1,6 +1,6 @@
1/* cnic.c: Broadcom CNIC core network driver. 1/* cnic.c: Broadcom CNIC core network driver.
2 * 2 *
3 * Copyright (c) 2006-2009 Broadcom Corporation 3 * Copyright (c) 2006-2010 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -10,6 +10,8 @@
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com> 10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
11 */ 11 */
12 12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
13#include <linux/module.h> 15#include <linux/module.h>
14 16
15#include <linux/kernel.h> 17#include <linux/kernel.h>
@@ -47,7 +49,6 @@
47#include "cnic_defs.h" 49#include "cnic_defs.h"
48 50
49#define DRV_MODULE_NAME "cnic" 51#define DRV_MODULE_NAME "cnic"
50#define PFX DRV_MODULE_NAME ": "
51 52
52static char version[] __devinitdata = 53static char version[] __devinitdata =
53 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; 54 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
@@ -326,6 +327,12 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
326 if (l5_cid >= MAX_CM_SK_TBL_SZ) 327 if (l5_cid >= MAX_CM_SK_TBL_SZ)
327 break; 328 break;
328 329
330 rcu_read_lock();
331 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
332 rc = -ENODEV;
333 rcu_read_unlock();
334 break;
335 }
329 csk = &cp->csk_tbl[l5_cid]; 336 csk = &cp->csk_tbl[l5_cid];
330 csk_hold(csk); 337 csk_hold(csk);
331 if (cnic_in_use(csk)) { 338 if (cnic_in_use(csk)) {
@@ -340,6 +347,7 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
340 cnic_cm_set_pg(csk); 347 cnic_cm_set_pg(csk);
341 } 348 }
342 csk_put(csk); 349 csk_put(csk);
350 rcu_read_unlock();
343 rc = 0; 351 rc = 0;
344 } 352 }
345 } 353 }
@@ -409,14 +417,13 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
409 struct cnic_dev *dev; 417 struct cnic_dev *dev;
410 418
411 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 419 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
412 printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n", 420 pr_err("%s: Bad type %d\n", __func__, ulp_type);
413 ulp_type);
414 return -EINVAL; 421 return -EINVAL;
415 } 422 }
416 mutex_lock(&cnic_lock); 423 mutex_lock(&cnic_lock);
417 if (cnic_ulp_tbl[ulp_type]) { 424 if (cnic_ulp_tbl[ulp_type]) {
418 printk(KERN_ERR PFX "cnic_register_driver: Type %d has already " 425 pr_err("%s: Type %d has already been registered\n",
419 "been registered\n", ulp_type); 426 __func__, ulp_type);
420 mutex_unlock(&cnic_lock); 427 mutex_unlock(&cnic_lock);
421 return -EBUSY; 428 return -EBUSY;
422 } 429 }
@@ -455,15 +462,14 @@ int cnic_unregister_driver(int ulp_type)
455 int i = 0; 462 int i = 0;
456 463
457 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 464 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
458 printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n", 465 pr_err("%s: Bad type %d\n", __func__, ulp_type);
459 ulp_type);
460 return -EINVAL; 466 return -EINVAL;
461 } 467 }
462 mutex_lock(&cnic_lock); 468 mutex_lock(&cnic_lock);
463 ulp_ops = cnic_ulp_tbl[ulp_type]; 469 ulp_ops = cnic_ulp_tbl[ulp_type];
464 if (!ulp_ops) { 470 if (!ulp_ops) {
465 printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not " 471 pr_err("%s: Type %d has not been registered\n",
466 "been registered\n", ulp_type); 472 __func__, ulp_type);
467 goto out_unlock; 473 goto out_unlock;
468 } 474 }
469 read_lock(&cnic_dev_lock); 475 read_lock(&cnic_dev_lock);
@@ -471,8 +477,8 @@ int cnic_unregister_driver(int ulp_type)
471 struct cnic_local *cp = dev->cnic_priv; 477 struct cnic_local *cp = dev->cnic_priv;
472 478
473 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 479 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
474 printk(KERN_ERR PFX "cnic_unregister_driver: Type %d " 480 pr_err("%s: Type %d still has devices registered\n",
475 "still has devices registered\n", ulp_type); 481 __func__, ulp_type);
476 read_unlock(&cnic_dev_lock); 482 read_unlock(&cnic_dev_lock);
477 goto out_unlock; 483 goto out_unlock;
478 } 484 }
@@ -492,8 +498,7 @@ int cnic_unregister_driver(int ulp_type)
492 } 498 }
493 499
494 if (atomic_read(&ulp_ops->ref_count) != 0) 500 if (atomic_read(&ulp_ops->ref_count) != 0)
495 printk(KERN_WARNING PFX "%s: Failed waiting for ref count to go" 501 netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
496 " to zero.\n", dev->netdev->name);
497 return 0; 502 return 0;
498 503
499out_unlock: 504out_unlock:
@@ -511,20 +516,19 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
511 struct cnic_ulp_ops *ulp_ops; 516 struct cnic_ulp_ops *ulp_ops;
512 517
513 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 518 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
514 printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n", 519 pr_err("%s: Bad type %d\n", __func__, ulp_type);
515 ulp_type);
516 return -EINVAL; 520 return -EINVAL;
517 } 521 }
518 mutex_lock(&cnic_lock); 522 mutex_lock(&cnic_lock);
519 if (cnic_ulp_tbl[ulp_type] == NULL) { 523 if (cnic_ulp_tbl[ulp_type] == NULL) {
520 printk(KERN_ERR PFX "cnic_register_device: Driver with type %d " 524 pr_err("%s: Driver with type %d has not been registered\n",
521 "has not been registered\n", ulp_type); 525 __func__, ulp_type);
522 mutex_unlock(&cnic_lock); 526 mutex_unlock(&cnic_lock);
523 return -EAGAIN; 527 return -EAGAIN;
524 } 528 }
525 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 529 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
526 printk(KERN_ERR PFX "cnic_register_device: Type %d has already " 530 pr_err("%s: Type %d has already been registered to this device\n",
527 "been registered to this device\n", ulp_type); 531 __func__, ulp_type);
528 mutex_unlock(&cnic_lock); 532 mutex_unlock(&cnic_lock);
529 return -EBUSY; 533 return -EBUSY;
530 } 534 }
@@ -552,8 +556,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
552 int i = 0; 556 int i = 0;
553 557
554 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 558 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
555 printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n", 559 pr_err("%s: Bad type %d\n", __func__, ulp_type);
556 ulp_type);
557 return -EINVAL; 560 return -EINVAL;
558 } 561 }
559 mutex_lock(&cnic_lock); 562 mutex_lock(&cnic_lock);
@@ -561,8 +564,8 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
561 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL); 564 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
562 cnic_put(dev); 565 cnic_put(dev);
563 } else { 566 } else {
564 printk(KERN_ERR PFX "cnic_unregister_device: device not " 567 pr_err("%s: device not registered to this ulp type %d\n",
565 "registered to this ulp type %d\n", ulp_type); 568 __func__, ulp_type);
566 mutex_unlock(&cnic_lock); 569 mutex_unlock(&cnic_lock);
567 return -EINVAL; 570 return -EINVAL;
568 } 571 }
@@ -576,8 +579,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
576 i++; 579 i++;
577 } 580 }
578 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type])) 581 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
579 printk(KERN_WARNING PFX "%s: Failed waiting for ULP up call" 582 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
580 " to complete.\n", dev->netdev->name);
581 583
582 return 0; 584 return 0;
583} 585}
@@ -898,7 +900,8 @@ static int cnic_alloc_uio(struct cnic_dev *dev) {
898 uinfo->mem[0].memtype = UIO_MEM_PHYS; 900 uinfo->mem[0].memtype = UIO_MEM_PHYS;
899 901
900 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 902 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
901 uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK; 903 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
904 PAGE_MASK;
902 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) 905 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
903 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; 906 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
904 else 907 else
@@ -1101,10 +1104,9 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1101 if (ret) 1104 if (ret)
1102 goto error; 1105 goto error;
1103 1106
1104 cp->bnx2x_status_blk = cp->status_blk;
1105 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; 1107 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1106 1108
1107 memset(cp->bnx2x_status_blk, 0, sizeof(struct host_status_block)); 1109 memset(cp->status_blk.bnx2x, 0, sizeof(*cp->status_blk.bnx2x));
1108 1110
1109 cp->l2_rx_ring_size = 15; 1111 cp->l2_rx_ring_size = 15;
1110 1112
@@ -1865,8 +1867,7 @@ static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
1865 } 1867 }
1866 1868
1867 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) { 1869 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
1868 printk(KERN_ERR PFX "%s: conn_buf size too big\n", 1870 netdev_err(dev->netdev, "conn_buf size too big\n");
1869 dev->netdev->name);
1870 return -ENOMEM; 1871 return -ENOMEM;
1871 } 1872 }
1872 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 1873 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
@@ -2026,13 +2027,13 @@ static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2026 break; 2027 break;
2027 default: 2028 default:
2028 ret = 0; 2029 ret = 0;
2029 printk(KERN_ERR PFX "%s: Unknown type of KWQE(0x%x)\n", 2030 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2030 dev->netdev->name, opcode); 2031 opcode);
2031 break; 2032 break;
2032 } 2033 }
2033 if (ret < 0) 2034 if (ret < 0)
2034 printk(KERN_ERR PFX "%s: KWQE(0x%x) failed\n", 2035 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2035 dev->netdev->name, opcode); 2036 opcode);
2036 i += work; 2037 i += work;
2037 } 2038 }
2038 return 0; 2039 return 0;
@@ -2074,8 +2075,8 @@ static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2074 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2) 2075 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2075 goto end; 2076 goto end;
2076 else { 2077 else {
2077 printk(KERN_ERR PFX "%s: Unknown type of KCQE(0x%x)\n", 2078 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2078 dev->netdev->name, kcqe_op_flag); 2079 kcqe_op_flag);
2079 goto end; 2080 goto end;
2080 } 2081 }
2081 2082
@@ -2204,7 +2205,7 @@ static void cnic_service_bnx2_msix(unsigned long data)
2204{ 2205{
2205 struct cnic_dev *dev = (struct cnic_dev *) data; 2206 struct cnic_dev *dev = (struct cnic_dev *) data;
2206 struct cnic_local *cp = dev->cnic_priv; 2207 struct cnic_local *cp = dev->cnic_priv;
2207 struct status_block_msix *status_blk = cp->bnx2_status_blk; 2208 struct status_block_msix *status_blk = cp->status_blk.bnx2;
2208 u32 status_idx = status_blk->status_idx; 2209 u32 status_idx = status_blk->status_idx;
2209 u16 hw_prod, sw_prod; 2210 u16 hw_prod, sw_prod;
2210 int kcqe_cnt; 2211 int kcqe_cnt;
@@ -2250,7 +2251,7 @@ static irqreturn_t cnic_irq(int irq, void *dev_instance)
2250 if (cp->ack_int) 2251 if (cp->ack_int)
2251 cp->ack_int(dev); 2252 cp->ack_int(dev);
2252 2253
2253 prefetch(cp->status_blk); 2254 prefetch(cp->status_blk.gen);
2254 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); 2255 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2255 2256
2256 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) 2257 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
@@ -2291,7 +2292,7 @@ static void cnic_service_bnx2x_bh(unsigned long data)
2291 struct cnic_local *cp = dev->cnic_priv; 2292 struct cnic_local *cp = dev->cnic_priv;
2292 u16 hw_prod, sw_prod; 2293 u16 hw_prod, sw_prod;
2293 struct cstorm_status_block_c *sblk = 2294 struct cstorm_status_block_c *sblk =
2294 &cp->bnx2x_status_blk->c_status_block; 2295 &cp->status_blk.bnx2x->c_status_block;
2295 u32 status_idx = sblk->status_block_index; 2296 u32 status_idx = sblk->status_block_index;
2296 int kcqe_cnt; 2297 int kcqe_cnt;
2297 2298
@@ -2333,7 +2334,7 @@ static int cnic_service_bnx2x(void *data, void *status_blk)
2333 struct cnic_local *cp = dev->cnic_priv; 2334 struct cnic_local *cp = dev->cnic_priv;
2334 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; 2335 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
2335 2336
2336 prefetch(cp->status_blk); 2337 prefetch(cp->status_blk.bnx2x);
2337 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); 2338 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2338 2339
2339 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) 2340 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
@@ -2513,7 +2514,7 @@ static int cnic_cm_offload_pg(struct cnic_sock *csk)
2513 l4kwqe->sa5 = dev->mac_addr[5]; 2514 l4kwqe->sa5 = dev->mac_addr[5];
2514 2515
2515 l4kwqe->etype = ETH_P_IP; 2516 l4kwqe->etype = ETH_P_IP;
2516 l4kwqe->ipid_count = DEF_IPID_COUNT; 2517 l4kwqe->ipid_start = DEF_IPID_START;
2517 l4kwqe->host_opaque = csk->l5_cid; 2518 l4kwqe->host_opaque = csk->l5_cid;
2518 2519
2519 if (csk->vlan_id) { 2520 if (csk->vlan_id) {
@@ -2859,8 +2860,8 @@ static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
2859{ 2860{
2860 struct cnic_dev *dev = csk->dev; 2861 struct cnic_dev *dev = csk->dev;
2861 struct cnic_local *cp = dev->cnic_priv; 2862 struct cnic_local *cp = dev->cnic_priv;
2862 int is_v6, err, rc = -ENETUNREACH; 2863 int is_v6, rc = 0;
2863 struct dst_entry *dst; 2864 struct dst_entry *dst = NULL;
2864 struct net_device *realdev; 2865 struct net_device *realdev;
2865 u32 local_port; 2866 u32 local_port;
2866 2867
@@ -2876,39 +2877,31 @@ static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
2876 clear_bit(SK_F_IPV6, &csk->flags); 2877 clear_bit(SK_F_IPV6, &csk->flags);
2877 2878
2878 if (is_v6) { 2879 if (is_v6) {
2879#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
2880 set_bit(SK_F_IPV6, &csk->flags); 2880 set_bit(SK_F_IPV6, &csk->flags);
2881 err = cnic_get_v6_route(&saddr->remote.v6, &dst); 2881 cnic_get_v6_route(&saddr->remote.v6, &dst);
2882 if (err)
2883 return err;
2884
2885 if (!dst || dst->error || !dst->dev)
2886 goto err_out;
2887 2882
2888 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr, 2883 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
2889 sizeof(struct in6_addr)); 2884 sizeof(struct in6_addr));
2890 csk->dst_port = saddr->remote.v6.sin6_port; 2885 csk->dst_port = saddr->remote.v6.sin6_port;
2891 local_port = saddr->local.v6.sin6_port; 2886 local_port = saddr->local.v6.sin6_port;
2892#else
2893 return rc;
2894#endif
2895 2887
2896 } else { 2888 } else {
2897 err = cnic_get_v4_route(&saddr->remote.v4, &dst); 2889 cnic_get_v4_route(&saddr->remote.v4, &dst);
2898 if (err)
2899 return err;
2900
2901 if (!dst || dst->error || !dst->dev)
2902 goto err_out;
2903 2890
2904 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr; 2891 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
2905 csk->dst_port = saddr->remote.v4.sin_port; 2892 csk->dst_port = saddr->remote.v4.sin_port;
2906 local_port = saddr->local.v4.sin_port; 2893 local_port = saddr->local.v4.sin_port;
2907 } 2894 }
2908 2895
2909 csk->vlan_id = cnic_get_vlan(dst->dev, &realdev); 2896 csk->vlan_id = 0;
2910 if (realdev != dev->netdev) 2897 csk->mtu = dev->netdev->mtu;
2911 goto err_out; 2898 if (dst && dst->dev) {
2899 u16 vlan = cnic_get_vlan(dst->dev, &realdev);
2900 if (realdev == dev->netdev) {
2901 csk->vlan_id = vlan;
2902 csk->mtu = dst_mtu(dst);
2903 }
2904 }
2912 2905
2913 if (local_port >= CNIC_LOCAL_PORT_MIN && 2906 if (local_port >= CNIC_LOCAL_PORT_MIN &&
2914 local_port < CNIC_LOCAL_PORT_MAX) { 2907 local_port < CNIC_LOCAL_PORT_MAX) {
@@ -2926,9 +2919,6 @@ static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
2926 } 2919 }
2927 csk->src_port = local_port; 2920 csk->src_port = local_port;
2928 2921
2929 csk->mtu = dst_mtu(dst);
2930 rc = 0;
2931
2932err_out: 2922err_out:
2933 dst_release(dst); 2923 dst_release(dst);
2934 return rc; 2924 return rc;
@@ -3052,6 +3042,14 @@ static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3052 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3042 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3053 goto done; 3043 goto done;
3054 } 3044 }
3045 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3046 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3047 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3048 cnic_cm_upcall(cp, csk,
3049 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3050 goto done;
3051 }
3052
3055 csk->pg_cid = kcqe->pg_cid; 3053 csk->pg_cid = kcqe->pg_cid;
3056 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); 3054 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3057 cnic_cm_conn_req(csk); 3055 cnic_cm_conn_req(csk);
@@ -3089,6 +3087,13 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3089 } 3087 }
3090 3088
3091 switch (opcode) { 3089 switch (opcode) {
3090 case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
3091 if (l4kcqe->status != 0) {
3092 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3093 cnic_cm_upcall(cp, csk,
3094 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3095 }
3096 break;
3092 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE: 3097 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
3093 if (l4kcqe->status == 0) 3098 if (l4kcqe->status == 0)
3094 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags); 3099 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
@@ -3099,7 +3104,10 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3099 break; 3104 break;
3100 3105
3101 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 3106 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3102 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) 3107 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
3108 cnic_cm_upcall(cp, csk, opcode);
3109 break;
3110 } else if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
3103 csk->state = opcode; 3111 csk->state = opcode;
3104 /* fall through */ 3112 /* fall through */
3105 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 3113 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
@@ -3163,6 +3171,16 @@ static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
3163 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) 3171 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags))
3164 return 1; 3172 return 1;
3165 } 3173 }
3174 /* 57710+ only workaround to handle unsolicited RESET_COMP
3175 * which will be treated like a RESET RCVD notification
3176 * which triggers the clean up procedure
3177 */
3178 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
3179 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
3180 csk->state = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
3181 return 1;
3182 }
3183 }
3166 return 0; 3184 return 0;
3167} 3185}
3168 3186
@@ -3172,10 +3190,8 @@ static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
3172 struct cnic_local *cp = dev->cnic_priv; 3190 struct cnic_local *cp = dev->cnic_priv;
3173 3191
3174 clear_bit(SK_F_CONNECT_START, &csk->flags); 3192 clear_bit(SK_F_CONNECT_START, &csk->flags);
3175 if (cnic_ready_to_close(csk, opcode)) { 3193 cnic_close_conn(csk);
3176 cnic_close_conn(csk); 3194 cnic_cm_upcall(cp, csk, opcode);
3177 cnic_cm_upcall(cp, csk, opcode);
3178 }
3179} 3195}
3180 3196
3181static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev) 3197static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
@@ -3393,8 +3409,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
3393 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220); 3409 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
3394 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220); 3410 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
3395 3411
3396 cp->bnx2_status_blk = cp->status_blk; 3412 cp->last_status_idx = cp->status_blk.bnx2->status_idx;
3397 cp->last_status_idx = cp->bnx2_status_blk->status_idx;
3398 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix, 3413 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
3399 (unsigned long) dev); 3414 (unsigned long) dev);
3400 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, 3415 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
@@ -3403,7 +3418,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
3403 tasklet_disable(&cp->cnic_irq_task); 3418 tasklet_disable(&cp->cnic_irq_task);
3404 return err; 3419 return err;
3405 } 3420 }
3406 while (cp->bnx2_status_blk->status_completion_producer_index && 3421 while (cp->status_blk.bnx2->status_completion_producer_index &&
3407 i < 10) { 3422 i < 10) {
3408 CNIC_WR(dev, BNX2_HC_COALESCE_NOW, 3423 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
3409 1 << (11 + sblk_num)); 3424 1 << (11 + sblk_num));
@@ -3411,13 +3426,13 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
3411 i++; 3426 i++;
3412 barrier(); 3427 barrier();
3413 } 3428 }
3414 if (cp->bnx2_status_blk->status_completion_producer_index) { 3429 if (cp->status_blk.bnx2->status_completion_producer_index) {
3415 cnic_free_irq(dev); 3430 cnic_free_irq(dev);
3416 goto failed; 3431 goto failed;
3417 } 3432 }
3418 3433
3419 } else { 3434 } else {
3420 struct status_block *sblk = cp->status_blk; 3435 struct status_block *sblk = cp->status_blk.gen;
3421 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND); 3436 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
3422 int i = 0; 3437 int i = 0;
3423 3438
@@ -3435,8 +3450,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
3435 return 0; 3450 return 0;
3436 3451
3437failed: 3452failed:
3438 printk(KERN_ERR PFX "%s: " "KCQ index not resetting to 0.\n", 3453 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
3439 dev->netdev->name);
3440 return -EBUSY; 3454 return -EBUSY;
3441} 3455}
3442 3456
@@ -3475,7 +3489,7 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
3475 int i; 3489 int i;
3476 struct tx_bd *txbd; 3490 struct tx_bd *txbd;
3477 dma_addr_t buf_map; 3491 dma_addr_t buf_map;
3478 struct status_block *s_blk = cp->status_blk; 3492 struct status_block *s_blk = cp->status_blk.gen;
3479 3493
3480 sb_id = cp->status_blk_num; 3494 sb_id = cp->status_blk_num;
3481 tx_cid = 20; 3495 tx_cid = 20;
@@ -3483,7 +3497,7 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
3483 cnic_init_context(dev, tx_cid + 1); 3497 cnic_init_context(dev, tx_cid + 1);
3484 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; 3498 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
3485 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3499 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3486 struct status_block_msix *sblk = cp->status_blk; 3500 struct status_block_msix *sblk = cp->status_blk.bnx2;
3487 3501
3488 tx_cid = TX_TSS_CID + sb_id - 1; 3502 tx_cid = TX_TSS_CID + sb_id - 1;
3489 cnic_init_context(dev, tx_cid); 3503 cnic_init_context(dev, tx_cid);
@@ -3539,7 +3553,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
3539 u32 cid_addr, sb_id, val, coal_reg, coal_val; 3553 u32 cid_addr, sb_id, val, coal_reg, coal_val;
3540 int i; 3554 int i;
3541 struct rx_bd *rxbd; 3555 struct rx_bd *rxbd;
3542 struct status_block *s_blk = cp->status_blk; 3556 struct status_block *s_blk = cp->status_blk.gen;
3543 3557
3544 sb_id = cp->status_blk_num; 3558 sb_id = cp->status_blk_num;
3545 cnic_init_context(dev, 2); 3559 cnic_init_context(dev, 2);
@@ -3547,7 +3561,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
3547 coal_reg = BNX2_HC_COMMAND; 3561 coal_reg = BNX2_HC_COMMAND;
3548 coal_val = CNIC_RD(dev, coal_reg); 3562 coal_val = CNIC_RD(dev, coal_reg);
3549 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3563 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3550 struct status_block_msix *sblk = cp->status_blk; 3564 struct status_block_msix *sblk = cp->status_blk.bnx2;
3551 3565
3552 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index; 3566 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
3553 coal_reg = BNX2_HC_COALESCE_NOW; 3567 coal_reg = BNX2_HC_COALESCE_NOW;
@@ -3646,7 +3660,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
3646{ 3660{
3647 struct cnic_local *cp = dev->cnic_priv; 3661 struct cnic_local *cp = dev->cnic_priv;
3648 struct cnic_eth_dev *ethdev = cp->ethdev; 3662 struct cnic_eth_dev *ethdev = cp->ethdev;
3649 struct status_block *sblk = cp->status_blk; 3663 struct status_block *sblk = cp->status_blk.gen;
3650 u32 val; 3664 u32 val;
3651 int err; 3665 int err;
3652 3666
@@ -3758,8 +3772,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
3758 3772
3759 err = cnic_init_bnx2_irq(dev); 3773 err = cnic_init_bnx2_irq(dev);
3760 if (err) { 3774 if (err) {
3761 printk(KERN_ERR PFX "%s: cnic_init_irq failed\n", 3775 netdev_err(dev->netdev, "cnic_init_irq failed\n");
3762 dev->netdev->name);
3763 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); 3776 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
3764 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); 3777 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
3765 return err; 3778 return err;
@@ -4122,8 +4135,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4122 offsetof(struct cstorm_status_block_c, 4135 offsetof(struct cstorm_status_block_c,
4123 index_values[HC_INDEX_C_ISCSI_EQ_CONS])); 4136 index_values[HC_INDEX_C_ISCSI_EQ_CONS]));
4124 if (eq_idx != 0) { 4137 if (eq_idx != 0) {
4125 printk(KERN_ERR PFX "%s: EQ cons index %x != 0\n", 4138 netdev_err(dev->netdev, "EQ cons index %x != 0\n", eq_idx);
4126 dev->netdev->name, eq_idx);
4127 return -EBUSY; 4139 return -EBUSY;
4128 } 4140 }
4129 ret = cnic_init_bnx2x_irq(dev); 4141 ret = cnic_init_bnx2x_irq(dev);
@@ -4208,8 +4220,7 @@ static int cnic_register_netdev(struct cnic_dev *dev)
4208 4220
4209 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev); 4221 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
4210 if (err) 4222 if (err)
4211 printk(KERN_ERR PFX "%s: register_cnic failed\n", 4223 netdev_err(dev->netdev, "register_cnic failed\n");
4212 dev->netdev->name);
4213 4224
4214 return err; 4225 return err;
4215} 4226}
@@ -4238,13 +4249,12 @@ static int cnic_start_hw(struct cnic_dev *dev)
4238 cp->chip_id = ethdev->chip_id; 4249 cp->chip_id = ethdev->chip_id;
4239 pci_dev_get(dev->pcidev); 4250 pci_dev_get(dev->pcidev);
4240 cp->func = PCI_FUNC(dev->pcidev->devfn); 4251 cp->func = PCI_FUNC(dev->pcidev->devfn);
4241 cp->status_blk = ethdev->irq_arr[0].status_blk; 4252 cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
4242 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num; 4253 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
4243 4254
4244 err = cp->alloc_resc(dev); 4255 err = cp->alloc_resc(dev);
4245 if (err) { 4256 if (err) {
4246 printk(KERN_ERR PFX "%s: allocate resource failure\n", 4257 netdev_err(dev->netdev, "allocate resource failure\n");
4247 dev->netdev->name);
4248 goto err1; 4258 goto err1;
4249 } 4259 }
4250 4260
@@ -4326,10 +4336,9 @@ static void cnic_free_dev(struct cnic_dev *dev)
4326 i++; 4336 i++;
4327 } 4337 }
4328 if (atomic_read(&dev->ref_count) != 0) 4338 if (atomic_read(&dev->ref_count) != 0)
4329 printk(KERN_ERR PFX "%s: Failed waiting for ref count to go" 4339 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
4330 " to zero.\n", dev->netdev->name);
4331 4340
4332 printk(KERN_INFO PFX "Removed CNIC device: %s\n", dev->netdev->name); 4341 netdev_info(dev->netdev, "Removed CNIC device\n");
4333 dev_put(dev->netdev); 4342 dev_put(dev->netdev);
4334 kfree(dev); 4343 kfree(dev);
4335} 4344}
@@ -4345,8 +4354,7 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
4345 4354
4346 cdev = kzalloc(alloc_size , GFP_KERNEL); 4355 cdev = kzalloc(alloc_size , GFP_KERNEL);
4347 if (cdev == NULL) { 4356 if (cdev == NULL) {
4348 printk(KERN_ERR PFX "%s: allocate dev struct failure\n", 4357 netdev_err(dev, "allocate dev struct failure\n");
4349 dev->name);
4350 return NULL; 4358 return NULL;
4351 } 4359 }
4352 4360
@@ -4364,7 +4372,7 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
4364 4372
4365 spin_lock_init(&cp->cnic_ulp_lock); 4373 spin_lock_init(&cp->cnic_ulp_lock);
4366 4374
4367 printk(KERN_INFO PFX "Added CNIC device: %s\n", dev->name); 4375 netdev_info(dev, "Added CNIC device\n");
4368 4376
4369 return cdev; 4377 return cdev;
4370} 4378}
@@ -4605,7 +4613,7 @@ static int __init cnic_init(void)
4605{ 4613{
4606 int rc = 0; 4614 int rc = 0;
4607 4615
4608 printk(KERN_INFO "%s", version); 4616 pr_info("%s", version);
4609 4617
4610 rc = register_netdevice_notifier(&cnic_netdev_notifier); 4618 rc = register_netdevice_notifier(&cnic_netdev_notifier);
4611 if (rc) { 4619 if (rc) {
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index 241d09acc0d4..a0d853dff983 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -1,6 +1,6 @@
1/* cnic.h: Broadcom CNIC core network driver. 1/* cnic.h: Broadcom CNIC core network driver.
2 * 2 *
3 * Copyright (c) 2006-2009 Broadcom Corporation 3 * Copyright (c) 2006-2010 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -101,7 +101,7 @@ struct cnic_redirect_entry {
101#define BNX2X_KWQ_DATA(cp, x) \ 101#define BNX2X_KWQ_DATA(cp, x) \
102 &(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)] 102 &(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)]
103 103
104#define DEF_IPID_COUNT 0xc001 104#define DEF_IPID_START 0x8000
105 105
106#define DEF_KA_TIMEOUT 10000 106#define DEF_KA_TIMEOUT 10000
107#define DEF_KA_INTERVAL 300000 107#define DEF_KA_INTERVAL 300000
@@ -224,9 +224,12 @@ struct cnic_local {
224 u16 kcq_prod_idx; 224 u16 kcq_prod_idx;
225 u32 kcq_io_addr; 225 u32 kcq_io_addr;
226 226
227 void *status_blk; 227 union {
228 struct status_block_msix *bnx2_status_blk; 228 void *gen;
229 struct host_status_block *bnx2x_status_blk; 229 struct status_block_msix *bnx2;
230 struct host_status_block *bnx2x;
231 } status_blk;
232
230 struct host_def_status_block *bnx2x_def_status_blk; 233 struct host_def_status_block *bnx2x_def_status_blk;
231 234
232 u32 status_blk_num; 235 u32 status_blk_num;
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
index 9827b278dc7c..7ce694d41b6b 100644
--- a/drivers/net/cnic_defs.h
+++ b/drivers/net/cnic_defs.h
@@ -1,7 +1,7 @@
1 1
2/* cnic.c: Broadcom CNIC core network driver. 2/* cnic.c: Broadcom CNIC core network driver.
3 * 3 *
4 * Copyright (c) 2006-2009 Broadcom Corporation 4 * Copyright (c) 2006-2010 Broadcom Corporation
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 8aaf98bdd4f7..110c62072e6f 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -1,6 +1,6 @@
1/* cnic_if.h: Broadcom CNIC core network driver. 1/* cnic_if.h: Broadcom CNIC core network driver.
2 * 2 *
3 * Copyright (c) 2006 Broadcom Corporation 3 * Copyright (c) 2006-2010 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -12,8 +12,8 @@
12#ifndef CNIC_IF_H 12#ifndef CNIC_IF_H
13#define CNIC_IF_H 13#define CNIC_IF_H
14 14
15#define CNIC_MODULE_VERSION "2.1.0" 15#define CNIC_MODULE_VERSION "2.1.1"
16#define CNIC_MODULE_RELDATE "Oct 10, 2009" 16#define CNIC_MODULE_RELDATE "Feb 22, 2010"
17 17
18#define CNIC_ULP_RDMA 0 18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1 19#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index bf2072e54200..b85c81f60d10 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -328,7 +328,6 @@ static int cpmac_config(struct net_device *dev, struct ifmap *map)
328static void cpmac_set_multicast_list(struct net_device *dev) 328static void cpmac_set_multicast_list(struct net_device *dev)
329{ 329{
330 struct dev_mc_list *iter; 330 struct dev_mc_list *iter;
331 int i;
332 u8 tmp; 331 u8 tmp;
333 u32 mbp, bit, hash[2] = { 0, }; 332 u32 mbp, bit, hash[2] = { 0, };
334 struct cpmac_priv *priv = netdev_priv(dev); 333 struct cpmac_priv *priv = netdev_priv(dev);
@@ -348,8 +347,7 @@ static void cpmac_set_multicast_list(struct net_device *dev)
348 * cpmac uses some strange mac address hashing 347 * cpmac uses some strange mac address hashing
349 * (not crc32) 348 * (not crc32)
350 */ 349 */
351 for (i = 0, iter = dev->mc_list; i < dev->mc_count; 350 netdev_for_each_mc_addr(iter, dev) {
352 i++, iter = iter->next) {
353 bit = 0; 351 bit = 0;
354 tmp = iter->dmi_addr[0]; 352 tmp = iter->dmi_addr[0];
355 bit ^= (tmp >> 2) ^ (tmp << 4); 353 bit ^= (tmp >> 2) ^ (tmp << 4);
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index a24be34a3f7a..dd24aadb778c 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1564,7 +1564,7 @@ static void
1564set_multicast_list(struct net_device *dev) 1564set_multicast_list(struct net_device *dev)
1565{ 1565{
1566 struct net_local *lp = netdev_priv(dev); 1566 struct net_local *lp = netdev_priv(dev);
1567 int num_addr = dev->mc_count; 1567 int num_addr = netdev_mc_count(dev);
1568 unsigned long int lo_bits; 1568 unsigned long int lo_bits;
1569 unsigned long int hi_bits; 1569 unsigned long int hi_bits;
1570 1570
@@ -1596,13 +1596,12 @@ set_multicast_list(struct net_device *dev)
1596 } else { 1596 } else {
1597 /* MC mode, receive normal and MC packets */ 1597 /* MC mode, receive normal and MC packets */
1598 char hash_ix; 1598 char hash_ix;
1599 struct dev_mc_list *dmi = dev->mc_list; 1599 struct dev_mc_list *dmi;
1600 int i;
1601 char *baddr; 1600 char *baddr;
1602 1601
1603 lo_bits = 0x00000000ul; 1602 lo_bits = 0x00000000ul;
1604 hi_bits = 0x00000000ul; 1603 hi_bits = 0x00000000ul;
1605 for (i = 0; i < num_addr; i++) { 1604 netdev_for_each_mc_addr(dmi, dev) {
1606 /* Calculate the hash index for the GA registers */ 1605 /* Calculate the hash index for the GA registers */
1607 1606
1608 hash_ix = 0; 1607 hash_ix = 0;
@@ -1632,7 +1631,6 @@ set_multicast_list(struct net_device *dev)
1632 } else { 1631 } else {
1633 lo_bits |= (1 << hash_ix); 1632 lo_bits |= (1 << hash_ix);
1634 } 1633 }
1635 dmi = dmi->next;
1636 } 1634 }
1637 /* Disable individual receive */ 1635 /* Disable individual receive */
1638 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard); 1636 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard);
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 0e79cef95c0a..14624019ce71 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -1785,7 +1785,7 @@ static void set_multicast_list(struct net_device *dev)
1785 { 1785 {
1786 lp->rx_mode = RX_ALL_ACCEPT; 1786 lp->rx_mode = RX_ALL_ACCEPT;
1787 } 1787 }
1788 else if((dev->flags&IFF_ALLMULTI)||dev->mc_list) 1788 else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
1789 { 1789 {
1790 /* The multicast-accept list is initialized to accept-all, and we 1790 /* The multicast-accept list is initialized to accept-all, and we
1791 rely on higher-level filtering for now. */ 1791 rely on higher-level filtering for now. */
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 6ff356d4c7ab..fe08a004b0dd 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -67,32 +67,6 @@
67/* Additional NETIF_MSG_* categories */ 67/* Additional NETIF_MSG_* categories */
68#define NETIF_MSG_MMIO 0x8000000 68#define NETIF_MSG_MMIO 0x8000000
69 69
70struct t3_rx_mode {
71 struct net_device *dev;
72 struct dev_mc_list *mclist;
73 unsigned int idx;
74};
75
76static inline void init_rx_mode(struct t3_rx_mode *p, struct net_device *dev,
77 struct dev_mc_list *mclist)
78{
79 p->dev = dev;
80 p->mclist = mclist;
81 p->idx = 0;
82}
83
84static inline u8 *t3_get_next_mcaddr(struct t3_rx_mode *rm)
85{
86 u8 *addr = NULL;
87
88 if (rm->mclist && rm->idx < rm->dev->mc_count) {
89 addr = rm->mclist->dmi_addr;
90 rm->mclist = rm->mclist->next;
91 rm->idx++;
92 }
93 return addr;
94}
95
96enum { 70enum {
97 MAX_NPORTS = 2, /* max # of ports */ 71 MAX_NPORTS = 2, /* max # of ports */
98 MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */ 72 MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */
@@ -746,7 +720,7 @@ void t3_mac_enable_exact_filters(struct cmac *mac);
746int t3_mac_enable(struct cmac *mac, int which); 720int t3_mac_enable(struct cmac *mac, int which);
747int t3_mac_disable(struct cmac *mac, int which); 721int t3_mac_disable(struct cmac *mac, int which);
748int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu); 722int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
749int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm); 723int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev);
750int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]); 724int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
751int t3_mac_set_num_ucast(struct cmac *mac, int n); 725int t3_mac_set_num_ucast(struct cmac *mac, int n);
752const struct mac_stats *t3_mac_update_stats(struct cmac *mac); 726const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 89bec9c3c141..6fd968abb073 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -80,7 +80,7 @@ enum {
80#define CH_DEVICE(devid, idx) \ 80#define CH_DEVICE(devid, idx) \
81 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx } 81 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 82
83static const struct pci_device_id cxgb3_pci_tbl[] = { 83static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = {
84 CH_DEVICE(0x20, 0), /* PE9000 */ 84 CH_DEVICE(0x20, 0), /* PE9000 */
85 CH_DEVICE(0x21, 1), /* T302E */ 85 CH_DEVICE(0x21, 1), /* T302E */
86 CH_DEVICE(0x22, 2), /* T310E */ 86 CH_DEVICE(0x22, 2), /* T310E */
@@ -324,11 +324,9 @@ void t3_os_phymod_changed(struct adapter *adap, int port_id)
324 324
325static void cxgb_set_rxmode(struct net_device *dev) 325static void cxgb_set_rxmode(struct net_device *dev)
326{ 326{
327 struct t3_rx_mode rm;
328 struct port_info *pi = netdev_priv(dev); 327 struct port_info *pi = netdev_priv(dev);
329 328
330 init_rx_mode(&rm, dev, dev->mc_list); 329 t3_mac_set_rx_mode(&pi->mac, dev);
331 t3_mac_set_rx_mode(&pi->mac, &rm);
332} 330}
333 331
334/** 332/**
@@ -339,17 +337,15 @@ static void cxgb_set_rxmode(struct net_device *dev)
339 */ 337 */
340static void link_start(struct net_device *dev) 338static void link_start(struct net_device *dev)
341{ 339{
342 struct t3_rx_mode rm;
343 struct port_info *pi = netdev_priv(dev); 340 struct port_info *pi = netdev_priv(dev);
344 struct cmac *mac = &pi->mac; 341 struct cmac *mac = &pi->mac;
345 342
346 init_rx_mode(&rm, dev, dev->mc_list);
347 t3_mac_reset(mac); 343 t3_mac_reset(mac);
348 t3_mac_set_num_ucast(mac, MAX_MAC_IDX); 344 t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
349 t3_mac_set_mtu(mac, dev->mtu); 345 t3_mac_set_mtu(mac, dev->mtu);
350 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr); 346 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
351 t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr); 347 t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
352 t3_mac_set_rx_mode(mac, &rm); 348 t3_mac_set_rx_mode(mac, dev);
353 t3_link_start(&pi->phy, mac, &pi->link_config); 349 t3_link_start(&pi->phy, mac, &pi->link_config);
354 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); 350 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
355} 351}
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 75064eea1d87..9498361119d6 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -1252,7 +1252,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
1252 struct mtutab mtutab; 1252 struct mtutab mtutab;
1253 unsigned int l2t_capacity; 1253 unsigned int l2t_capacity;
1254 1254
1255 t = kcalloc(1, sizeof(*t), GFP_KERNEL); 1255 t = kzalloc(sizeof(*t), GFP_KERNEL);
1256 if (!t) 1256 if (!t)
1257 return -ENOMEM; 1257 return -ENOMEM;
1258 1258
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 318a018ca7c5..048205903741 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -480,6 +480,7 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
480{ 480{
481 if (q->pend_cred >= q->credits / 4) { 481 if (q->pend_cred >= q->credits / 4) {
482 q->pend_cred = 0; 482 q->pend_cred = 0;
483 wmb();
483 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); 484 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
484 } 485 }
485} 486}
@@ -2286,11 +2287,14 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
2286 while (likely(budget_left && is_new_response(r, q))) { 2287 while (likely(budget_left && is_new_response(r, q))) {
2287 int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled; 2288 int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
2288 struct sk_buff *skb = NULL; 2289 struct sk_buff *skb = NULL;
2289 u32 len, flags = ntohl(r->flags); 2290 u32 len, flags;
2290 __be32 rss_hi = *(const __be32 *)r, 2291 __be32 rss_hi, rss_lo;
2291 rss_lo = r->rss_hdr.rss_hash_val;
2292 2292
2293 rmb();
2293 eth = r->rss_hdr.opcode == CPL_RX_PKT; 2294 eth = r->rss_hdr.opcode == CPL_RX_PKT;
2295 rss_hi = *(const __be32 *)r;
2296 rss_lo = r->rss_hdr.rss_hash_val;
2297 flags = ntohl(r->flags);
2294 2298
2295 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) { 2299 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2296 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC); 2300 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
@@ -2501,7 +2505,10 @@ static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2501 refill_rspq(adap, q, q->credits); 2505 refill_rspq(adap, q, q->credits);
2502 q->credits = 0; 2506 q->credits = 0;
2503 } 2507 }
2504 } while (is_new_response(r, q) && is_pure_response(r)); 2508 if (!is_new_response(r, q))
2509 break;
2510 rmb();
2511 } while (is_pure_response(r));
2505 2512
2506 if (sleeping) 2513 if (sleeping)
2507 check_ring_db(adap, qs, sleeping); 2514 check_ring_db(adap, qs, sleeping);
@@ -2535,6 +2542,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2535 2542
2536 if (!is_new_response(r, q)) 2543 if (!is_new_response(r, q))
2537 return -1; 2544 return -1;
2545 rmb();
2538 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) { 2546 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2539 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | 2547 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2540 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); 2548 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 032cfe065570..3ab9f51918aa 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1262,7 +1262,8 @@ void t3_link_changed(struct adapter *adapter, int port_id)
1262 lc->fc = fc; 1262 lc->fc = fc;
1263 } 1263 }
1264 1264
1265 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc); 1265 t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
1266 speed, duplex, fc);
1266} 1267}
1267 1268
1268void t3_link_fault(struct adapter *adapter, int port_id) 1269void t3_link_fault(struct adapter *adapter, int port_id)
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c
index 0109ee4f2f91..c142a2132e9f 100644
--- a/drivers/net/cxgb3/xgmac.c
+++ b/drivers/net/cxgb3/xgmac.c
@@ -297,29 +297,30 @@ static int hash_hw_addr(const u8 * addr)
297 return hash; 297 return hash;
298} 298}
299 299
300int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm) 300int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev)
301{ 301{
302 u32 val, hash_lo, hash_hi; 302 u32 val, hash_lo, hash_hi;
303 struct adapter *adap = mac->adapter; 303 struct adapter *adap = mac->adapter;
304 unsigned int oft = mac->offset; 304 unsigned int oft = mac->offset;
305 305
306 val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES; 306 val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
307 if (rm->dev->flags & IFF_PROMISC) 307 if (dev->flags & IFF_PROMISC)
308 val |= F_COPYALLFRAMES; 308 val |= F_COPYALLFRAMES;
309 t3_write_reg(adap, A_XGM_RX_CFG + oft, val); 309 t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
310 310
311 if (rm->dev->flags & IFF_ALLMULTI) 311 if (dev->flags & IFF_ALLMULTI)
312 hash_lo = hash_hi = 0xffffffff; 312 hash_lo = hash_hi = 0xffffffff;
313 else { 313 else {
314 u8 *addr; 314 struct dev_mc_list *dmi;
315 int exact_addr_idx = mac->nucast; 315 int exact_addr_idx = mac->nucast;
316 316
317 hash_lo = hash_hi = 0; 317 hash_lo = hash_hi = 0;
318 while ((addr = t3_get_next_mcaddr(rm))) 318 netdev_for_each_mc_addr(dmi, dev)
319 if (exact_addr_idx < EXACT_ADDR_FILTERS) 319 if (exact_addr_idx < EXACT_ADDR_FILTERS)
320 set_addr_filter(mac, exact_addr_idx++, addr); 320 set_addr_filter(mac, exact_addr_idx++,
321 dmi->dmi_addr);
321 else { 322 else {
322 int hash = hash_hw_addr(addr); 323 int hash = hash_hw_addr(dmi->dmi_addr);
323 324
324 if (hash < 32) 325 if (hash < 32)
325 hash_lo |= (1 << hash); 326 hash_lo |= (1 << hash);
@@ -353,6 +354,9 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
353 * packet size register includes header, but not FCS. 354 * packet size register includes header, but not FCS.
354 */ 355 */
355 mtu += 14; 356 mtu += 14;
357 if (mtu > 1536)
358 mtu += 4;
359
356 if (mtu > MAX_FRAME_SIZE - 4) 360 if (mtu > MAX_FRAME_SIZE - 4)
357 return -EINVAL; 361 return -EINVAL;
358 t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu); 362 t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 1605bc225b0c..1ac9440eb3fb 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -957,19 +957,18 @@ static void emac_dev_mcast_set(struct net_device *ndev)
957 } else { 957 } else {
958 mbp_enable = (mbp_enable & ~EMAC_MBP_RXPROMISC); 958 mbp_enable = (mbp_enable & ~EMAC_MBP_RXPROMISC);
959 if ((ndev->flags & IFF_ALLMULTI) || 959 if ((ndev->flags & IFF_ALLMULTI) ||
960 (ndev->mc_count > EMAC_DEF_MAX_MULTICAST_ADDRESSES)) { 960 netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
961 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); 961 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
962 emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL); 962 emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
963 } 963 }
964 if (ndev->mc_count > 0) { 964 if (!netdev_mc_empty(ndev)) {
965 struct dev_mc_list *mc_ptr; 965 struct dev_mc_list *mc_ptr;
966 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); 966 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
967 emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL); 967 emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
968 /* program multicast address list into EMAC hardware */ 968 /* program multicast address list into EMAC hardware */
969 for (mc_ptr = ndev->mc_list; mc_ptr; 969 netdev_for_each_mc_addr(mc_ptr, ndev) {
970 mc_ptr = mc_ptr->next) {
971 emac_add_mcast(priv, EMAC_MULTICAST_ADD, 970 emac_add_mcast(priv, EMAC_MULTICAST_ADD,
972 (u8 *)mc_ptr->dmi_addr); 971 (u8 *) mc_ptr->dmi_addr);
973 } 972 }
974 } else { 973 } else {
975 mbp_enable = (mbp_enable & ~EMAC_MBP_RXMCAST); 974 mbp_enable = (mbp_enable & ~EMAC_MBP_RXMCAST);
@@ -2683,8 +2682,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
2683 priv->emac_base_phys = res->start + pdata->ctrl_reg_offset; 2682 priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
2684 size = res->end - res->start + 1; 2683 size = res->end - res->start + 1;
2685 if (!request_mem_region(res->start, size, ndev->name)) { 2684 if (!request_mem_region(res->start, size, ndev->name)) {
2686 dev_err(emac_dev, "DaVinci EMAC: failed request_mem_region() \ 2685 dev_err(emac_dev, "DaVinci EMAC: failed request_mem_region() for regs\n");
2687 for regs\n");
2688 rc = -ENXIO; 2686 rc = -ENXIO;
2689 goto probe_quit; 2687 goto probe_quit;
2690 } 2688 }
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index 45794f6cb0f6..a0a6830b5e6d 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -464,7 +464,7 @@ static int de620_close(struct net_device *dev)
464 464
465static void de620_set_multicast_list(struct net_device *dev) 465static void de620_set_multicast_list(struct net_device *dev)
466{ 466{
467 if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC)) 467 if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
468 { /* Enable promiscuous mode */ 468 { /* Enable promiscuous mode */
469 de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL); 469 de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL);
470 } 470 }
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index be9590253aa1..8cf3cc6f20e2 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -940,9 +940,8 @@ static void lance_load_multicast(struct net_device *dev)
940{ 940{
941 struct lance_private *lp = netdev_priv(dev); 941 struct lance_private *lp = netdev_priv(dev);
942 volatile u16 *ib = (volatile u16 *)dev->mem_start; 942 volatile u16 *ib = (volatile u16 *)dev->mem_start;
943 struct dev_mc_list *dmi = dev->mc_list; 943 struct dev_mc_list *dmi;
944 char *addrs; 944 char *addrs;
945 int i;
946 u32 crc; 945 u32 crc;
947 946
948 /* set all multicast bits */ 947 /* set all multicast bits */
@@ -960,9 +959,8 @@ static void lance_load_multicast(struct net_device *dev)
960 *lib_ptr(ib, filter[3], lp->type) = 0; 959 *lib_ptr(ib, filter[3], lp->type) = 0;
961 960
962 /* Add addresses */ 961 /* Add addresses */
963 for (i = 0; i < dev->mc_count; i++) { 962 netdev_for_each_mc_addr(dmi, dev) {
964 addrs = dmi->dmi_addr; 963 addrs = dmi->dmi_addr;
965 dmi = dmi->next;
966 964
967 /* multicast address? */ 965 /* multicast address? */
968 if (!(*addrs & 1)) 966 if (!(*addrs & 1))
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index 6a6ea038d7a3..ed53a8d45f4e 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -1052,12 +1052,9 @@ static int __devinit dfx_driver_init(struct net_device *dev,
1052 board_name = "DEFEA"; 1052 board_name = "DEFEA";
1053 if (dfx_bus_pci) 1053 if (dfx_bus_pci)
1054 board_name = "DEFPA"; 1054 board_name = "DEFPA";
1055 pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, " 1055 pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
1056 "Hardware addr = %02X-%02X-%02X-%02X-%02X-%02X\n",
1057 print_name, board_name, dfx_use_mmio ? "" : "I/O ", 1056 print_name, board_name, dfx_use_mmio ? "" : "I/O ",
1058 (long long)bar_start, dev->irq, 1057 (long long)bar_start, dev->irq, dev->dev_addr);
1059 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
1060 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
1061 1058
1062 /* 1059 /*
1063 * Get memory for descriptor block, consumer block, and other buffers 1060 * Get memory for descriptor block, consumer block, and other buffers
@@ -2230,7 +2227,7 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
2230 * perfect filtering will be used. 2227 * perfect filtering will be used.
2231 */ 2228 */
2232 2229
2233 if (dev->mc_count > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count)) 2230 if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
2234 { 2231 {
2235 bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */ 2232 bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */
2236 bp->mc_count = 0; /* Don't add mc addrs to CAM */ 2233 bp->mc_count = 0; /* Don't add mc addrs to CAM */
@@ -2238,17 +2235,16 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
2238 else 2235 else
2239 { 2236 {
2240 bp->group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC group prom mode */ 2237 bp->group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC group prom mode */
2241 bp->mc_count = dev->mc_count; /* Add mc addrs to CAM */ 2238 bp->mc_count = netdev_mc_count(dev); /* Add mc addrs to CAM */
2242 } 2239 }
2243 2240
2244 /* Copy addresses to multicast address table, then update adapter CAM */ 2241 /* Copy addresses to multicast address table, then update adapter CAM */
2245 2242
2246 dmi = dev->mc_list; /* point to first multicast addr */ 2243 i = 0;
2247 for (i=0; i < bp->mc_count; i++) 2244 netdev_for_each_mc_addr(dmi, dev)
2248 { 2245 memcpy(&bp->mc_table[i++ * FDDI_K_ALEN],
2249 memcpy(&bp->mc_table[i*FDDI_K_ALEN], dmi->dmi_addr, FDDI_K_ALEN); 2246 dmi->dmi_addr, FDDI_K_ALEN);
2250 dmi = dmi->next; /* point to next multicast addr */ 2247
2251 }
2252 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS) 2248 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2253 { 2249 {
2254 DBG_printk("%s: Could not update multicast address table!\n", dev->name); 2250 DBG_printk("%s: Could not update multicast address table!\n", dev->name);
@@ -3631,7 +3627,7 @@ static int __devinit dfx_pci_register(struct pci_dev *,
3631 const struct pci_device_id *); 3627 const struct pci_device_id *);
3632static void __devexit dfx_pci_unregister(struct pci_dev *); 3628static void __devexit dfx_pci_unregister(struct pci_dev *);
3633 3629
3634static struct pci_device_id dfx_pci_table[] = { 3630static DEFINE_PCI_DEVICE_TABLE(dfx_pci_table) = {
3635 { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) }, 3631 { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
3636 { } 3632 { }
3637}; 3633};
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 0c1f491d20bf..744c1928dfca 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1272,7 +1272,7 @@ static void set_multicast_list(struct net_device *dev)
1272static void SetMulticastFilter(struct net_device *dev) 1272static void SetMulticastFilter(struct net_device *dev)
1273{ 1273{
1274 struct depca_private *lp = netdev_priv(dev); 1274 struct depca_private *lp = netdev_priv(dev);
1275 struct dev_mc_list *dmi = dev->mc_list; 1275 struct dev_mc_list *dmi;
1276 char *addrs; 1276 char *addrs;
1277 int i, j, bit, byte; 1277 int i, j, bit, byte;
1278 u16 hashcode; 1278 u16 hashcode;
@@ -1287,9 +1287,8 @@ static void SetMulticastFilter(struct net_device *dev)
1287 lp->init_block.mcast_table[i] = 0; 1287 lp->init_block.mcast_table[i] = 0;
1288 } 1288 }
1289 /* Add multicast addresses */ 1289 /* Add multicast addresses */
1290 for (i = 0; i < dev->mc_count; i++) { /* for each address in the list */ 1290 netdev_for_each_mc_addr(dmi, dev) {
1291 addrs = dmi->dmi_addr; 1291 addrs = dmi->dmi_addr;
1292 dmi = dmi->next;
1293 if ((*addrs & 0x01) == 1) { /* multicast address? */ 1292 if ((*addrs & 0x01) == 1) { /* multicast address? */
1294 crc = ether_crc(ETH_ALEN, addrs); 1293 crc = ether_crc(ETH_ALEN, addrs);
1295 hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */ 1294 hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 2a8b6a7c0b87..b05bad829827 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -1128,19 +1128,16 @@ set_multicast (struct net_device *dev)
1128 /* Receive all frames promiscuously. */ 1128 /* Receive all frames promiscuously. */
1129 rx_mode = ReceiveAllFrames; 1129 rx_mode = ReceiveAllFrames;
1130 } else if ((dev->flags & IFF_ALLMULTI) || 1130 } else if ((dev->flags & IFF_ALLMULTI) ||
1131 (dev->mc_count > multicast_filter_limit)) { 1131 (netdev_mc_count(dev) > multicast_filter_limit)) {
1132 /* Receive broadcast and multicast frames */ 1132 /* Receive broadcast and multicast frames */
1133 rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast; 1133 rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
1134 } else if (dev->mc_count > 0) { 1134 } else if (!netdev_mc_empty(dev)) {
1135 int i;
1136 struct dev_mc_list *mclist; 1135 struct dev_mc_list *mclist;
1137 /* Receive broadcast frames and multicast frames filtering 1136 /* Receive broadcast frames and multicast frames filtering
1138 by Hashtable */ 1137 by Hashtable */
1139 rx_mode = 1138 rx_mode =
1140 ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast; 1139 ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
1141 for (i=0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1140 netdev_for_each_mc_addr(mclist, dev) {
1142 i++, mclist=mclist->next)
1143 {
1144 int bit, index = 0; 1141 int bit, index = 0;
1145 int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr); 1142 int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1146 /* The inverted high significant 6 bits of CRC are 1143 /* The inverted high significant 6 bits of CRC are
diff --git a/drivers/net/dl2k.h b/drivers/net/dl2k.h
index 266ec8777ca8..7caab3d26a9e 100644
--- a/drivers/net/dl2k.h
+++ b/drivers/net/dl2k.h
@@ -537,7 +537,7 @@ struct netdev_private {
537 driver_data Data private to the driver. 537 driver_data Data private to the driver.
538*/ 538*/
539 539
540static const struct pci_device_id rio_pci_tbl[] = { 540static DEFINE_PCI_DEVICE_TABLE(rio_pci_tbl) = {
541 {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, }, 541 {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, },
542 {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, }, 542 {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, },
543 { } 543 { }
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index b37730065688..1c67f1138ca7 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -724,8 +724,7 @@ static void
724dm9000_hash_table(struct net_device *dev) 724dm9000_hash_table(struct net_device *dev)
725{ 725{
726 board_info_t *db = netdev_priv(dev); 726 board_info_t *db = netdev_priv(dev);
727 struct dev_mc_list *mcptr = dev->mc_list; 727 struct dev_mc_list *mcptr;
728 int mc_cnt = dev->mc_count;
729 int i, oft; 728 int i, oft;
730 u32 hash_val; 729 u32 hash_val;
731 u16 hash_table[4]; 730 u16 hash_table[4];
@@ -753,7 +752,7 @@ dm9000_hash_table(struct net_device *dev)
753 rcr |= RCR_ALL; 752 rcr |= RCR_ALL;
754 753
755 /* the multicast address in Hash Table : 64 bits */ 754 /* the multicast address in Hash Table : 64 bits */
756 for (i = 0; i < mc_cnt; i++, mcptr = mcptr->next) { 755 netdev_for_each_mc_addr(mcptr, dev) {
757 hash_val = ether_crc_le(6, mcptr->dmi_addr) & 0x3f; 756 hash_val = ether_crc_le(6, mcptr->dmi_addr) & 0x3f;
758 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); 757 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
759 } 758 }
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 839fb2b136d3..a26ccab057d5 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -208,7 +208,7 @@ MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
208#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ 208#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
209 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ 209 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
210 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich } 210 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
211static struct pci_device_id e100_id_table[] = { 211static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
212 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0), 212 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
213 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0), 213 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
214 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3), 214 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
@@ -1537,14 +1537,18 @@ static int e100_hw_init(struct nic *nic)
1537static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) 1537static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1538{ 1538{
1539 struct net_device *netdev = nic->netdev; 1539 struct net_device *netdev = nic->netdev;
1540 struct dev_mc_list *list = netdev->mc_list; 1540 struct dev_mc_list *list;
1541 u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS); 1541 u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
1542 1542
1543 cb->command = cpu_to_le16(cb_multi); 1543 cb->command = cpu_to_le16(cb_multi);
1544 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); 1544 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1545 for (i = 0; list && i < count; i++, list = list->next) 1545 i = 0;
1546 memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr, 1546 netdev_for_each_mc_addr(list, netdev) {
1547 if (i == count)
1548 break;
1549 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &list->dmi_addr,
1547 ETH_ALEN); 1550 ETH_ALEN);
1551 }
1548} 1552}
1549 1553
1550static void e100_set_multicast_list(struct net_device *netdev) 1554static void e100_set_multicast_list(struct net_device *netdev)
@@ -1552,7 +1556,7 @@ static void e100_set_multicast_list(struct net_device *netdev)
1552 struct nic *nic = netdev_priv(netdev); 1556 struct nic *nic = netdev_priv(netdev);
1553 1557
1554 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n", 1558 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
1555 netdev->mc_count, netdev->flags); 1559 netdev_mc_count(netdev), netdev->flags);
1556 1560
1557 if (netdev->flags & IFF_PROMISC) 1561 if (netdev->flags & IFF_PROMISC)
1558 nic->flags |= promiscuous; 1562 nic->flags |= promiscuous;
@@ -1560,7 +1564,7 @@ static void e100_set_multicast_list(struct net_device *netdev)
1560 nic->flags &= ~promiscuous; 1564 nic->flags &= ~promiscuous;
1561 1565
1562 if (netdev->flags & IFF_ALLMULTI || 1566 if (netdev->flags & IFF_ALLMULTI ||
1563 netdev->mc_count > E100_MAX_MULTICAST_ADDRS) 1567 netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
1564 nic->flags |= multicast_all; 1568 nic->flags |= multicast_all;
1565 else 1569 else
1566 nic->flags &= ~multicast_all; 1570 nic->flags &= ~multicast_all;
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index e8932db7ee77..9902b33b7160 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -349,6 +349,7 @@ extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
349extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter); 349extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
350extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter); 350extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
351extern void e1000_update_stats(struct e1000_adapter *adapter); 351extern void e1000_update_stats(struct e1000_adapter *adapter);
352extern bool e1000_has_link(struct e1000_adapter *adapter);
352extern void e1000_power_up_phy(struct e1000_adapter *); 353extern void e1000_power_up_phy(struct e1000_adapter *);
353extern void e1000_set_ethtool_ops(struct net_device *netdev); 354extern void e1000_set_ethtool_ops(struct net_device *netdev);
354extern void e1000_check_options(struct e1000_adapter *adapter); 355extern void e1000_check_options(struct e1000_adapter *adapter);
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 13e9ece16889..c67e93117271 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -215,6 +215,23 @@ static int e1000_set_settings(struct net_device *netdev,
215 return 0; 215 return 0;
216} 216}
217 217
218static u32 e1000_get_link(struct net_device *netdev)
219{
220 struct e1000_adapter *adapter = netdev_priv(netdev);
221
222 /*
223 * If the link is not reported up to netdev, interrupts are disabled,
224 * and so the physical link state may have changed since we last
225 * looked. Set get_link_status to make sure that the true link
226 * state is interrogated, rather than pulling a cached and possibly
227 * stale link state from the driver.
228 */
229 if (!netif_carrier_ok(netdev))
230 adapter->hw.get_link_status = 1;
231
232 return e1000_has_link(adapter);
233}
234
218static void e1000_get_pauseparam(struct net_device *netdev, 235static void e1000_get_pauseparam(struct net_device *netdev,
219 struct ethtool_pauseparam *pause) 236 struct ethtool_pauseparam *pause)
220{ 237{
@@ -1892,7 +1909,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
1892 .get_msglevel = e1000_get_msglevel, 1909 .get_msglevel = e1000_get_msglevel,
1893 .set_msglevel = e1000_set_msglevel, 1910 .set_msglevel = e1000_set_msglevel,
1894 .nway_reset = e1000_nway_reset, 1911 .nway_reset = e1000_nway_reset,
1895 .get_link = ethtool_op_get_link, 1912 .get_link = e1000_get_link,
1896 .get_eeprom_len = e1000_get_eeprom_len, 1913 .get_eeprom_len = e1000_get_eeprom_len,
1897 .get_eeprom = e1000_get_eeprom, 1914 .get_eeprom = e1000_get_eeprom,
1898 .set_eeprom = e1000_set_eeprom, 1915 .set_eeprom = e1000_set_eeprom,
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 765543663a4f..8be6faee43e6 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -42,7 +42,7 @@ static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation
42 * Macro expands to... 42 * Macro expands to...
43 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} 43 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
44 */ 44 */
45static struct pci_device_id e1000_pci_tbl[] = { 45static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
46 INTEL_E1000_ETHERNET_DEVICE(0x1000), 46 INTEL_E1000_ETHERNET_DEVICE(0x1000),
47 INTEL_E1000_ETHERNET_DEVICE(0x1001), 47 INTEL_E1000_ETHERNET_DEVICE(0x1001),
48 INTEL_E1000_ETHERNET_DEVICE(0x1004), 48 INTEL_E1000_ETHERNET_DEVICE(0x1004),
@@ -847,6 +847,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
847 goto err_pci_reg; 847 goto err_pci_reg;
848 848
849 pci_set_master(pdev); 849 pci_set_master(pdev);
850 err = pci_save_state(pdev);
851 if (err)
852 goto err_alloc_etherdev;
850 853
851 err = -ENOMEM; 854 err = -ENOMEM;
852 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 855 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
@@ -2127,7 +2130,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2127 rctl |= E1000_RCTL_VFE; 2130 rctl |= E1000_RCTL_VFE;
2128 } 2131 }
2129 2132
2130 if (netdev->uc.count > rar_entries - 1) { 2133 if (netdev_uc_count(netdev) > rar_entries - 1) {
2131 rctl |= E1000_RCTL_UPE; 2134 rctl |= E1000_RCTL_UPE;
2132 } else if (!(netdev->flags & IFF_PROMISC)) { 2135 } else if (!(netdev->flags & IFF_PROMISC)) {
2133 rctl &= ~E1000_RCTL_UPE; 2136 rctl &= ~E1000_RCTL_UPE;
@@ -2150,7 +2153,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2150 */ 2153 */
2151 i = 1; 2154 i = 1;
2152 if (use_uc) 2155 if (use_uc)
2153 list_for_each_entry(ha, &netdev->uc.list, list) { 2156 netdev_for_each_uc_addr(ha, netdev) {
2154 if (i == rar_entries) 2157 if (i == rar_entries)
2155 break; 2158 break;
2156 e1000_rar_set(hw, ha->addr, i++); 2159 e1000_rar_set(hw, ha->addr, i++);
@@ -2158,29 +2161,25 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2158 2161
2159 WARN_ON(i == rar_entries); 2162 WARN_ON(i == rar_entries);
2160 2163
2161 mc_ptr = netdev->mc_list; 2164 netdev_for_each_mc_addr(mc_ptr, netdev) {
2162 2165 if (i == rar_entries) {
2163 for (; i < rar_entries; i++) { 2166 /* load any remaining addresses into the hash table */
2164 if (mc_ptr) { 2167 u32 hash_reg, hash_bit, mta;
2165 e1000_rar_set(hw, mc_ptr->da_addr, i); 2168 hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr);
2166 mc_ptr = mc_ptr->next; 2169 hash_reg = (hash_value >> 5) & 0x7F;
2170 hash_bit = hash_value & 0x1F;
2171 mta = (1 << hash_bit);
2172 mcarray[hash_reg] |= mta;
2167 } else { 2173 } else {
2168 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); 2174 e1000_rar_set(hw, mc_ptr->da_addr, i++);
2169 E1000_WRITE_FLUSH();
2170 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2171 E1000_WRITE_FLUSH();
2172 } 2175 }
2173 } 2176 }
2174 2177
2175 /* load any remaining addresses into the hash table */ 2178 for (; i < rar_entries; i++) {
2176 2179 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2177 for (; mc_ptr; mc_ptr = mc_ptr->next) { 2180 E1000_WRITE_FLUSH();
2178 u32 hash_reg, hash_bit, mta; 2181 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2179 hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr); 2182 E1000_WRITE_FLUSH();
2180 hash_reg = (hash_value >> 5) & 0x7F;
2181 hash_bit = hash_value & 0x1F;
2182 mta = (1 << hash_bit);
2183 mcarray[hash_reg] |= mta;
2184 } 2183 }
2185 2184
2186 /* write the hash table completely, write from bottom to avoid 2185 /* write the hash table completely, write from bottom to avoid
@@ -2246,7 +2245,7 @@ static void e1000_82547_tx_fifo_stall(unsigned long data)
2246 } 2245 }
2247} 2246}
2248 2247
2249static bool e1000_has_link(struct e1000_adapter *adapter) 2248bool e1000_has_link(struct e1000_adapter *adapter)
2250{ 2249{
2251 struct e1000_hw *hw = &adapter->hw; 2250 struct e1000_hw *hw = &adapter->hw;
2252 bool link_active = false; 2251 bool link_active = false;
@@ -4613,6 +4612,7 @@ static int e1000_resume(struct pci_dev *pdev)
4613 4612
4614 pci_set_power_state(pdev, PCI_D0); 4613 pci_set_power_state(pdev, PCI_D0);
4615 pci_restore_state(pdev); 4614 pci_restore_state(pdev);
4615 pci_save_state(pdev);
4616 4616
4617 if (adapter->need_ioport) 4617 if (adapter->need_ioport)
4618 err = pci_enable_device(pdev); 4618 err = pci_enable_device(pdev);
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 02d67d047d96..3c95acb3a87d 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -267,8 +267,14 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
267 } 267 }
268 268
269 switch (hw->mac.type) { 269 switch (hw->mac.type) {
270 case e1000_82573:
271 func->set_lan_id = e1000_set_lan_id_single_port;
272 func->check_mng_mode = e1000e_check_mng_mode_generic;
273 func->led_on = e1000e_led_on_generic;
274 break;
270 case e1000_82574: 275 case e1000_82574:
271 case e1000_82583: 276 case e1000_82583:
277 func->set_lan_id = e1000_set_lan_id_single_port;
272 func->check_mng_mode = e1000_check_mng_mode_82574; 278 func->check_mng_mode = e1000_check_mng_mode_82574;
273 func->led_on = e1000_led_on_82574; 279 func->led_on = e1000_led_on_82574;
274 break; 280 break;
@@ -922,9 +928,12 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
922 ew32(IMC, 0xffffffff); 928 ew32(IMC, 0xffffffff);
923 icr = er32(ICR); 929 icr = er32(ICR);
924 930
925 if (hw->mac.type == e1000_82571 && 931 /* Install any alternate MAC address into RAR0 */
926 hw->dev_spec.e82571.alt_mac_addr_is_present) 932 ret_val = e1000_check_alt_mac_addr_generic(hw);
927 e1000e_set_laa_state_82571(hw, true); 933 if (ret_val)
934 return ret_val;
935
936 e1000e_set_laa_state_82571(hw, true);
928 937
929 /* Reinitialize the 82571 serdes link state machine */ 938 /* Reinitialize the 82571 serdes link state machine */
930 if (hw->phy.media_type == e1000_media_type_internal_serdes) 939 if (hw->phy.media_type == e1000_media_type_internal_serdes)
@@ -1225,32 +1234,6 @@ static s32 e1000_led_on_82574(struct e1000_hw *hw)
1225} 1234}
1226 1235
1227/** 1236/**
1228 * e1000_update_mc_addr_list_82571 - Update Multicast addresses
1229 * @hw: pointer to the HW structure
1230 * @mc_addr_list: array of multicast addresses to program
1231 * @mc_addr_count: number of multicast addresses to program
1232 * @rar_used_count: the first RAR register free to program
1233 * @rar_count: total number of supported Receive Address Registers
1234 *
1235 * Updates the Receive Address Registers and Multicast Table Array.
1236 * The caller must have a packed mc_addr_list of multicast addresses.
1237 * The parameter rar_count will usually be hw->mac.rar_entry_count
1238 * unless there are workarounds that change this.
1239 **/
1240static void e1000_update_mc_addr_list_82571(struct e1000_hw *hw,
1241 u8 *mc_addr_list,
1242 u32 mc_addr_count,
1243 u32 rar_used_count,
1244 u32 rar_count)
1245{
1246 if (e1000e_get_laa_state_82571(hw))
1247 rar_count--;
1248
1249 e1000e_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count,
1250 rar_used_count, rar_count);
1251}
1252
1253/**
1254 * e1000_setup_link_82571 - Setup flow control and link settings 1237 * e1000_setup_link_82571 - Setup flow control and link settings
1255 * @hw: pointer to the HW structure 1238 * @hw: pointer to the HW structure
1256 * 1239 *
@@ -1621,6 +1604,29 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
1621} 1604}
1622 1605
1623/** 1606/**
1607 * e1000_read_mac_addr_82571 - Read device MAC address
1608 * @hw: pointer to the HW structure
1609 **/
1610static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
1611{
1612 s32 ret_val = 0;
1613
1614 /*
1615 * If there's an alternate MAC address place it in RAR0
1616 * so that it will override the Si installed default perm
1617 * address.
1618 */
1619 ret_val = e1000_check_alt_mac_addr_generic(hw);
1620 if (ret_val)
1621 goto out;
1622
1623 ret_val = e1000_read_mac_addr_generic(hw);
1624
1625out:
1626 return ret_val;
1627}
1628
1629/**
1624 * e1000_power_down_phy_copper_82571 - Remove link during PHY power down 1630 * e1000_power_down_phy_copper_82571 - Remove link during PHY power down
1625 * @hw: pointer to the HW structure 1631 * @hw: pointer to the HW structure
1626 * 1632 *
@@ -1695,10 +1701,11 @@ static struct e1000_mac_operations e82571_mac_ops = {
1695 .cleanup_led = e1000e_cleanup_led_generic, 1701 .cleanup_led = e1000e_cleanup_led_generic,
1696 .clear_hw_cntrs = e1000_clear_hw_cntrs_82571, 1702 .clear_hw_cntrs = e1000_clear_hw_cntrs_82571,
1697 .get_bus_info = e1000e_get_bus_info_pcie, 1703 .get_bus_info = e1000e_get_bus_info_pcie,
1704 .set_lan_id = e1000_set_lan_id_multi_port_pcie,
1698 /* .get_link_up_info: media type dependent */ 1705 /* .get_link_up_info: media type dependent */
1699 /* .led_on: mac type dependent */ 1706 /* .led_on: mac type dependent */
1700 .led_off = e1000e_led_off_generic, 1707 .led_off = e1000e_led_off_generic,
1701 .update_mc_addr_list = e1000_update_mc_addr_list_82571, 1708 .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
1702 .write_vfta = e1000_write_vfta_generic, 1709 .write_vfta = e1000_write_vfta_generic,
1703 .clear_vfta = e1000_clear_vfta_82571, 1710 .clear_vfta = e1000_clear_vfta_82571,
1704 .reset_hw = e1000_reset_hw_82571, 1711 .reset_hw = e1000_reset_hw_82571,
@@ -1706,6 +1713,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
1706 .setup_link = e1000_setup_link_82571, 1713 .setup_link = e1000_setup_link_82571,
1707 /* .setup_physical_interface: media type dependent */ 1714 /* .setup_physical_interface: media type dependent */
1708 .setup_led = e1000e_setup_led_generic, 1715 .setup_led = e1000e_setup_led_generic,
1716 .read_mac_addr = e1000_read_mac_addr_82571,
1709}; 1717};
1710 1718
1711static struct e1000_phy_operations e82_phy_ops_igp = { 1719static struct e1000_phy_operations e82_phy_ops_igp = {
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index e02e38221ed4..db05ec355749 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -460,6 +460,8 @@
460 */ 460 */
461#define E1000_RAR_ENTRIES 15 461#define E1000_RAR_ENTRIES 15
462#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ 462#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
463#define E1000_RAL_MAC_ADDR_LEN 4
464#define E1000_RAH_MAC_ADDR_LEN 2
463 465
464/* Error Codes */ 466/* Error Codes */
465#define E1000_ERR_NVM 1 467#define E1000_ERR_NVM 1
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index d236efaf7478..c2ec095d2163 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -459,7 +459,7 @@ extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
459extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); 459extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
460extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); 460extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
461extern void e1000e_update_stats(struct e1000_adapter *adapter); 461extern void e1000e_update_stats(struct e1000_adapter *adapter);
462extern bool e1000_has_link(struct e1000_adapter *adapter); 462extern bool e1000e_has_link(struct e1000_adapter *adapter);
463extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); 463extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
464extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); 464extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
465 465
@@ -503,6 +503,8 @@ extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
503extern s32 e1000e_led_on_generic(struct e1000_hw *hw); 503extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
504extern s32 e1000e_led_off_generic(struct e1000_hw *hw); 504extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
505extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw); 505extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
506extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
507extern void e1000_set_lan_id_single_port(struct e1000_hw *hw);
506extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex); 508extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex);
507extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex); 509extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex);
508extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw); 510extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
@@ -517,9 +519,7 @@ extern void e1000_clear_vfta_generic(struct e1000_hw *hw);
517extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); 519extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
518extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, 520extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
519 u8 *mc_addr_list, 521 u8 *mc_addr_list,
520 u32 mc_addr_count, 522 u32 mc_addr_count);
521 u32 rar_used_count,
522 u32 rar_count);
523extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); 523extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
524extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); 524extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
525extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); 525extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
@@ -530,6 +530,7 @@ extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
530extern s32 e1000e_force_mac_fc(struct e1000_hw *hw); 530extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
531extern s32 e1000e_blink_led(struct e1000_hw *hw); 531extern s32 e1000e_blink_led(struct e1000_hw *hw);
532extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); 532extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
533extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
533extern void e1000e_reset_adaptive(struct e1000_hw *hw); 534extern void e1000e_reset_adaptive(struct e1000_hw *hw);
534extern void e1000e_update_adaptive(struct e1000_hw *hw); 535extern void e1000e_update_adaptive(struct e1000_hw *hw);
535 536
@@ -629,7 +630,15 @@ extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16
629extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw); 630extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
630extern void e1000e_release_nvm(struct e1000_hw *hw); 631extern void e1000e_release_nvm(struct e1000_hw *hw);
631extern void e1000e_reload_nvm(struct e1000_hw *hw); 632extern void e1000e_reload_nvm(struct e1000_hw *hw);
632extern s32 e1000e_read_mac_addr(struct e1000_hw *hw); 633extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
634
635static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
636{
637 if (hw->mac.ops.read_mac_addr)
638 return hw->mac.ops.read_mac_addr(hw);
639
640 return e1000_read_mac_addr_generic(hw);
641}
633 642
634static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) 643static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
635{ 644{
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index e2aa3b788564..27d21589a69a 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -246,6 +246,9 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
246 break; 246 break;
247 } 247 }
248 248
249 /* set lan id for port to determine which phy lock to use */
250 hw->mac.ops.set_lan_id(hw);
251
249 return 0; 252 return 0;
250} 253}
251 254
@@ -814,7 +817,9 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
814 ew32(IMC, 0xffffffff); 817 ew32(IMC, 0xffffffff);
815 icr = er32(ICR); 818 icr = er32(ICR);
816 819
817 return 0; 820 ret_val = e1000_check_alt_mac_addr_generic(hw);
821
822 return ret_val;
818} 823}
819 824
820/** 825/**
@@ -1340,6 +1345,29 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
1340} 1345}
1341 1346
1342/** 1347/**
1348 * e1000_read_mac_addr_80003es2lan - Read device MAC address
1349 * @hw: pointer to the HW structure
1350 **/
1351static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
1352{
1353 s32 ret_val = 0;
1354
1355 /*
1356 * If there's an alternate MAC address place it in RAR0
1357 * so that it will override the Si installed default perm
1358 * address.
1359 */
1360 ret_val = e1000_check_alt_mac_addr_generic(hw);
1361 if (ret_val)
1362 goto out;
1363
1364 ret_val = e1000_read_mac_addr_generic(hw);
1365
1366out:
1367 return ret_val;
1368}
1369
1370/**
1343 * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down 1371 * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down
1344 * @hw: pointer to the HW structure 1372 * @hw: pointer to the HW structure
1345 * 1373 *
@@ -1403,12 +1431,14 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
1403} 1431}
1404 1432
1405static struct e1000_mac_operations es2_mac_ops = { 1433static struct e1000_mac_operations es2_mac_ops = {
1434 .read_mac_addr = e1000_read_mac_addr_80003es2lan,
1406 .id_led_init = e1000e_id_led_init, 1435 .id_led_init = e1000e_id_led_init,
1407 .check_mng_mode = e1000e_check_mng_mode_generic, 1436 .check_mng_mode = e1000e_check_mng_mode_generic,
1408 /* check_for_link dependent on media type */ 1437 /* check_for_link dependent on media type */
1409 .cleanup_led = e1000e_cleanup_led_generic, 1438 .cleanup_led = e1000e_cleanup_led_generic,
1410 .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan, 1439 .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan,
1411 .get_bus_info = e1000e_get_bus_info_pcie, 1440 .get_bus_info = e1000e_get_bus_info_pcie,
1441 .set_lan_id = e1000_set_lan_id_multi_port_pcie,
1412 .get_link_up_info = e1000_get_link_up_info_80003es2lan, 1442 .get_link_up_info = e1000_get_link_up_info_80003es2lan,
1413 .led_on = e1000e_led_on_generic, 1443 .led_on = e1000e_led_on_generic,
1414 .led_off = e1000e_led_off_generic, 1444 .led_off = e1000e_led_off_generic,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 0aa50c229c79..b33e3cbe9ab0 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -202,7 +202,7 @@ static u32 e1000_get_link(struct net_device *netdev)
202 if (!netif_carrier_ok(netdev)) 202 if (!netif_carrier_ok(netdev))
203 mac->get_link_status = 1; 203 mac->get_link_status = 1;
204 204
205 return e1000_has_link(adapter); 205 return e1000e_has_link(adapter);
206} 206}
207 207
208static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) 208static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index eccf29b75c41..8bdcd5f24eff 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -389,6 +389,9 @@ enum e1e_registers {
389 389
390#define E1000_FUNC_1 1 390#define E1000_FUNC_1 1
391 391
392#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
393#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
394
392enum e1000_mac_type { 395enum e1000_mac_type {
393 e1000_82571, 396 e1000_82571,
394 e1000_82572, 397 e1000_82572,
@@ -746,16 +749,18 @@ struct e1000_mac_operations {
746 void (*clear_hw_cntrs)(struct e1000_hw *); 749 void (*clear_hw_cntrs)(struct e1000_hw *);
747 void (*clear_vfta)(struct e1000_hw *); 750 void (*clear_vfta)(struct e1000_hw *);
748 s32 (*get_bus_info)(struct e1000_hw *); 751 s32 (*get_bus_info)(struct e1000_hw *);
752 void (*set_lan_id)(struct e1000_hw *);
749 s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); 753 s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
750 s32 (*led_on)(struct e1000_hw *); 754 s32 (*led_on)(struct e1000_hw *);
751 s32 (*led_off)(struct e1000_hw *); 755 s32 (*led_off)(struct e1000_hw *);
752 void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32); 756 void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
753 s32 (*reset_hw)(struct e1000_hw *); 757 s32 (*reset_hw)(struct e1000_hw *);
754 s32 (*init_hw)(struct e1000_hw *); 758 s32 (*init_hw)(struct e1000_hw *);
755 s32 (*setup_link)(struct e1000_hw *); 759 s32 (*setup_link)(struct e1000_hw *);
756 s32 (*setup_physical_interface)(struct e1000_hw *); 760 s32 (*setup_physical_interface)(struct e1000_hw *);
757 s32 (*setup_led)(struct e1000_hw *); 761 s32 (*setup_led)(struct e1000_hw *);
758 void (*write_vfta)(struct e1000_hw *, u32, u32); 762 void (*write_vfta)(struct e1000_hw *, u32, u32);
763 s32 (*read_mac_addr)(struct e1000_hw *);
759}; 764};
760 765
761/* Function pointers for the PHY. */ 766/* Function pointers for the PHY. */
@@ -814,6 +819,10 @@ struct e1000_mac_info {
814 u16 ifs_ratio; 819 u16 ifs_ratio;
815 u16 ifs_step_size; 820 u16 ifs_step_size;
816 u16 mta_reg_count; 821 u16 mta_reg_count;
822
823 /* Maximum size of the MTA register table in all supported adapters */
824 #define MAX_MTA_REG 128
825 u32 mta_shadow[MAX_MTA_REG];
817 u16 rar_entry_count; 826 u16 rar_entry_count;
818 827
819 u8 forced_speed_duplex; 828 u8 forced_speed_duplex;
@@ -897,7 +906,6 @@ struct e1000_fc_info {
897 906
898struct e1000_dev_spec_82571 { 907struct e1000_dev_spec_82571 {
899 bool laa_is_present; 908 bool laa_is_present;
900 bool alt_mac_addr_is_present;
901 u32 smb_counter; 909 u32 smb_counter;
902}; 910};
903 911
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 8b6ecd127889..54d03a0ce3ce 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -3368,6 +3368,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
3368 /* cleanup_led dependent on mac type */ 3368 /* cleanup_led dependent on mac type */
3369 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, 3369 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
3370 .get_bus_info = e1000_get_bus_info_ich8lan, 3370 .get_bus_info = e1000_get_bus_info_ich8lan,
3371 .set_lan_id = e1000_set_lan_id_single_port,
3371 .get_link_up_info = e1000_get_link_up_info_ich8lan, 3372 .get_link_up_info = e1000_get_link_up_info_ich8lan,
3372 /* led_on dependent on mac type */ 3373 /* led_on dependent on mac type */
3373 /* led_off dependent on mac type */ 3374 /* led_off dependent on mac type */
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 2fa9b36a2c5a..2425ed11d5cc 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -51,10 +51,10 @@ enum e1000_mng_mode {
51 **/ 51 **/
52s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) 52s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
53{ 53{
54 struct e1000_mac_info *mac = &hw->mac;
54 struct e1000_bus_info *bus = &hw->bus; 55 struct e1000_bus_info *bus = &hw->bus;
55 struct e1000_adapter *adapter = hw->adapter; 56 struct e1000_adapter *adapter = hw->adapter;
56 u32 status; 57 u16 pcie_link_status, cap_offset;
57 u16 pcie_link_status, pci_header_type, cap_offset;
58 58
59 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); 59 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
60 if (!cap_offset) { 60 if (!cap_offset) {
@@ -68,20 +68,46 @@ s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
68 PCIE_LINK_WIDTH_SHIFT); 68 PCIE_LINK_WIDTH_SHIFT);
69 } 69 }
70 70
71 pci_read_config_word(adapter->pdev, PCI_HEADER_TYPE_REGISTER, 71 mac->ops.set_lan_id(hw);
72 &pci_header_type);
73 if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
74 status = er32(STATUS);
75 bus->func = (status & E1000_STATUS_FUNC_MASK)
76 >> E1000_STATUS_FUNC_SHIFT;
77 } else {
78 bus->func = 0;
79 }
80 72
81 return 0; 73 return 0;
82} 74}
83 75
84/** 76/**
77 * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
78 *
79 * @hw: pointer to the HW structure
80 *
81 * Determines the LAN function id by reading memory-mapped registers
82 * and swaps the port value if requested.
83 **/
84void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
85{
86 struct e1000_bus_info *bus = &hw->bus;
87 u32 reg;
88
89 /*
90 * The status register reports the correct function number
91 * for the device regardless of function swap state.
92 */
93 reg = er32(STATUS);
94 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
95}
96
97/**
98 * e1000_set_lan_id_single_port - Set LAN id for a single port device
99 * @hw: pointer to the HW structure
100 *
101 * Sets the LAN function id to zero for a single port device.
102 **/
103void e1000_set_lan_id_single_port(struct e1000_hw *hw)
104{
105 struct e1000_bus_info *bus = &hw->bus;
106
107 bus->func = 0;
108}
109
110/**
85 * e1000_clear_vfta_generic - Clear VLAN filter table 111 * e1000_clear_vfta_generic - Clear VLAN filter table
86 * @hw: pointer to the HW structure 112 * @hw: pointer to the HW structure
87 * 113 *
@@ -139,6 +165,68 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
139} 165}
140 166
141/** 167/**
168 * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
169 * @hw: pointer to the HW structure
170 *
171 * Checks the nvm for an alternate MAC address. An alternate MAC address
172 * can be setup by pre-boot software and must be treated like a permanent
173 * address and must override the actual permanent MAC address. If an
174 * alternate MAC address is found it is programmed into RAR0, replacing
175 * the permanent address that was installed into RAR0 by the Si on reset.
176 * This function will return SUCCESS unless it encounters an error while
177 * reading the EEPROM.
178 **/
179s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
180{
181 u32 i;
182 s32 ret_val = 0;
183 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
184 u8 alt_mac_addr[ETH_ALEN];
185
186 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
187 &nvm_alt_mac_addr_offset);
188 if (ret_val) {
189 e_dbg("NVM Read Error\n");
190 goto out;
191 }
192
193 if (nvm_alt_mac_addr_offset == 0xFFFF) {
194 /* There is no Alternate MAC Address */
195 goto out;
196 }
197
198 if (hw->bus.func == E1000_FUNC_1)
199 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
200 for (i = 0; i < ETH_ALEN; i += 2) {
201 offset = nvm_alt_mac_addr_offset + (i >> 1);
202 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
203 if (ret_val) {
204 e_dbg("NVM Read Error\n");
205 goto out;
206 }
207
208 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
209 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
210 }
211
212 /* if multicast bit is set, the alternate address will not be used */
213 if (alt_mac_addr[0] & 0x01) {
214 e_dbg("Ignoring Alternate Mac Address with MC bit set\n");
215 goto out;
216 }
217
218 /*
219 * We have a valid alternate MAC address, and we want to treat it the
220 * same as the normal permanent MAC address stored by the HW into the
221 * RAR. Do this by mapping this address into RAR0.
222 */
223 e1000e_rar_set(hw, alt_mac_addr, 0);
224
225out:
226 return ret_val;
227}
228
229/**
142 * e1000e_rar_set - Set receive address register 230 * e1000e_rar_set - Set receive address register
143 * @hw: pointer to the HW structure 231 * @hw: pointer to the HW structure
144 * @addr: pointer to the receive address 232 * @addr: pointer to the receive address
@@ -252,62 +340,34 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
252 * @hw: pointer to the HW structure 340 * @hw: pointer to the HW structure
253 * @mc_addr_list: array of multicast addresses to program 341 * @mc_addr_list: array of multicast addresses to program
254 * @mc_addr_count: number of multicast addresses to program 342 * @mc_addr_count: number of multicast addresses to program
255 * @rar_used_count: the first RAR register free to program
256 * @rar_count: total number of supported Receive Address Registers
257 * 343 *
258 * Updates the Receive Address Registers and Multicast Table Array. 344 * Updates entire Multicast Table Array.
259 * The caller must have a packed mc_addr_list of multicast addresses. 345 * The caller must have a packed mc_addr_list of multicast addresses.
260 * The parameter rar_count will usually be hw->mac.rar_entry_count
261 * unless there are workarounds that change this.
262 **/ 346 **/
263void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, 347void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
264 u8 *mc_addr_list, u32 mc_addr_count, 348 u8 *mc_addr_list, u32 mc_addr_count)
265 u32 rar_used_count, u32 rar_count)
266{ 349{
267 u32 i; 350 u32 hash_value, hash_bit, hash_reg;
268 u32 *mcarray = kzalloc(hw->mac.mta_reg_count * sizeof(u32), GFP_ATOMIC); 351 int i;
269 352
270 if (!mcarray) { 353 /* clear mta_shadow */
271 printk(KERN_ERR "multicast array memory allocation failed\n"); 354 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
272 return;
273 }
274 355
275 /* 356 /* update mta_shadow from mc_addr_list */
276 * Load the first set of multicast addresses into the exact 357 for (i = 0; (u32) i < mc_addr_count; i++) {
277 * filters (RAR). If there are not enough to fill the RAR
278 * array, clear the filters.
279 */
280 for (i = rar_used_count; i < rar_count; i++) {
281 if (mc_addr_count) {
282 e1000e_rar_set(hw, mc_addr_list, i);
283 mc_addr_count--;
284 mc_addr_list += ETH_ALEN;
285 } else {
286 E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
287 e1e_flush();
288 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
289 e1e_flush();
290 }
291 }
292
293 /* Load any remaining multicast addresses into the hash table. */
294 for (; mc_addr_count > 0; mc_addr_count--) {
295 u32 hash_value, hash_reg, hash_bit, mta;
296 hash_value = e1000_hash_mc_addr(hw, mc_addr_list); 358 hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
297 e_dbg("Hash value = 0x%03X\n", hash_value); 359
298 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); 360 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
299 hash_bit = hash_value & 0x1F; 361 hash_bit = hash_value & 0x1F;
300 mta = (1 << hash_bit);
301 mcarray[hash_reg] |= mta;
302 mc_addr_list += ETH_ALEN;
303 }
304 362
305 /* write the hash table completely */ 363 hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
306 for (i = 0; i < hw->mac.mta_reg_count; i++) 364 mc_addr_list += (ETH_ALEN);
307 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, mcarray[i]); 365 }
308 366
367 /* replace the entire MTA table */
368 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
369 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
309 e1e_flush(); 370 e1e_flush();
310 kfree(mcarray);
311} 371}
312 372
313/** 373/**
@@ -2072,67 +2132,27 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
2072} 2132}
2073 2133
2074/** 2134/**
2075 * e1000e_read_mac_addr - Read device MAC address 2135 * e1000_read_mac_addr_generic - Read device MAC address
2076 * @hw: pointer to the HW structure 2136 * @hw: pointer to the HW structure
2077 * 2137 *
2078 * Reads the device MAC address from the EEPROM and stores the value. 2138 * Reads the device MAC address from the EEPROM and stores the value.
2079 * Since devices with two ports use the same EEPROM, we increment the 2139 * Since devices with two ports use the same EEPROM, we increment the
2080 * last bit in the MAC address for the second port. 2140 * last bit in the MAC address for the second port.
2081 **/ 2141 **/
2082s32 e1000e_read_mac_addr(struct e1000_hw *hw) 2142s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
2083{ 2143{
2084 s32 ret_val; 2144 u32 rar_high;
2085 u16 offset, nvm_data, i; 2145 u32 rar_low;
2086 u16 mac_addr_offset = 0; 2146 u16 i;
2087
2088 if (hw->mac.type == e1000_82571) {
2089 /* Check for an alternate MAC address. An alternate MAC
2090 * address can be setup by pre-boot software and must be
2091 * treated like a permanent address and must override the
2092 * actual permanent MAC address.*/
2093 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
2094 &mac_addr_offset);
2095 if (ret_val) {
2096 e_dbg("NVM Read Error\n");
2097 return ret_val;
2098 }
2099 if (mac_addr_offset == 0xFFFF)
2100 mac_addr_offset = 0;
2101
2102 if (mac_addr_offset) {
2103 if (hw->bus.func == E1000_FUNC_1)
2104 mac_addr_offset += ETH_ALEN/sizeof(u16);
2105
2106 /* make sure we have a valid mac address here
2107 * before using it */
2108 ret_val = e1000_read_nvm(hw, mac_addr_offset, 1,
2109 &nvm_data);
2110 if (ret_val) {
2111 e_dbg("NVM Read Error\n");
2112 return ret_val;
2113 }
2114 if (nvm_data & 0x0001)
2115 mac_addr_offset = 0;
2116 }
2117 2147
2118 if (mac_addr_offset) 2148 rar_high = er32(RAH(0));
2119 hw->dev_spec.e82571.alt_mac_addr_is_present = 1; 2149 rar_low = er32(RAL(0));
2120 }
2121 2150
2122 for (i = 0; i < ETH_ALEN; i += 2) { 2151 for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
2123 offset = mac_addr_offset + (i >> 1); 2152 hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
2124 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
2125 if (ret_val) {
2126 e_dbg("NVM Read Error\n");
2127 return ret_val;
2128 }
2129 hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
2130 hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
2131 }
2132 2153
2133 /* Flip last bit of mac address if we're on second port */ 2154 for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
2134 if (!mac_addr_offset && hw->bus.func == E1000_FUNC_1) 2155 hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
2135 hw->mac.perm_addr[5] ^= 1;
2136 2156
2137 for (i = 0; i < ETH_ALEN; i++) 2157 for (i = 0; i < ETH_ALEN; i++)
2138 hw->mac.addr[i] = hw->mac.perm_addr[i]; 2158 hw->mac.addr[i] = hw->mac.perm_addr[i];
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 57f149b75fbe..88d54d3efcef 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2541,22 +2541,14 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2541 * @hw: pointer to the HW structure 2541 * @hw: pointer to the HW structure
2542 * @mc_addr_list: array of multicast addresses to program 2542 * @mc_addr_list: array of multicast addresses to program
2543 * @mc_addr_count: number of multicast addresses to program 2543 * @mc_addr_count: number of multicast addresses to program
2544 * @rar_used_count: the first RAR register free to program
2545 * @rar_count: total number of supported Receive Address Registers
2546 * 2544 *
2547 * Updates the Receive Address Registers and Multicast Table Array. 2545 * Updates the Multicast Table Array.
2548 * The caller must have a packed mc_addr_list of multicast addresses. 2546 * The caller must have a packed mc_addr_list of multicast addresses.
2549 * The parameter rar_count will usually be hw->mac.rar_entry_count
2550 * unless there are workarounds that change this. Currently no func pointer
2551 * exists and all implementations are handled in the generic version of this
2552 * function.
2553 **/ 2547 **/
2554static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, 2548static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
2555 u32 mc_addr_count, u32 rar_used_count, 2549 u32 mc_addr_count)
2556 u32 rar_count)
2557{ 2550{
2558 hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count, 2551 hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
2559 rar_used_count, rar_count);
2560} 2552}
2561 2553
2562/** 2554/**
@@ -2572,7 +2564,6 @@ static void e1000_set_multi(struct net_device *netdev)
2572{ 2564{
2573 struct e1000_adapter *adapter = netdev_priv(netdev); 2565 struct e1000_adapter *adapter = netdev_priv(netdev);
2574 struct e1000_hw *hw = &adapter->hw; 2566 struct e1000_hw *hw = &adapter->hw;
2575 struct e1000_mac_info *mac = &hw->mac;
2576 struct dev_mc_list *mc_ptr; 2567 struct dev_mc_list *mc_ptr;
2577 u8 *mta_list; 2568 u8 *mta_list;
2578 u32 rctl; 2569 u32 rctl;
@@ -2598,31 +2589,25 @@ static void e1000_set_multi(struct net_device *netdev)
2598 2589
2599 ew32(RCTL, rctl); 2590 ew32(RCTL, rctl);
2600 2591
2601 if (netdev->mc_count) { 2592 if (!netdev_mc_empty(netdev)) {
2602 mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC); 2593 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
2603 if (!mta_list) 2594 if (!mta_list)
2604 return; 2595 return;
2605 2596
2606 /* prepare a packed array of only addresses. */ 2597 /* prepare a packed array of only addresses. */
2607 mc_ptr = netdev->mc_list; 2598 i = 0;
2608 2599 netdev_for_each_mc_addr(mc_ptr, netdev)
2609 for (i = 0; i < netdev->mc_count; i++) { 2600 memcpy(mta_list + (i++ * ETH_ALEN),
2610 if (!mc_ptr) 2601 mc_ptr->dmi_addr, ETH_ALEN);
2611 break;
2612 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr,
2613 ETH_ALEN);
2614 mc_ptr = mc_ptr->next;
2615 }
2616 2602
2617 e1000_update_mc_addr_list(hw, mta_list, i, 1, 2603 e1000_update_mc_addr_list(hw, mta_list, i);
2618 mac->rar_entry_count);
2619 kfree(mta_list); 2604 kfree(mta_list);
2620 } else { 2605 } else {
2621 /* 2606 /*
2622 * if we're called from probe, we might not have 2607 * if we're called from probe, we might not have
2623 * anything to do here, so clear out the list 2608 * anything to do here, so clear out the list
2624 */ 2609 */
2625 e1000_update_mc_addr_list(hw, NULL, 0, 1, mac->rar_entry_count); 2610 e1000_update_mc_addr_list(hw, NULL, 0);
2626 } 2611 }
2627} 2612}
2628 2613
@@ -3482,7 +3467,7 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
3482 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); 3467 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
3483} 3468}
3484 3469
3485bool e1000_has_link(struct e1000_adapter *adapter) 3470bool e1000e_has_link(struct e1000_adapter *adapter)
3486{ 3471{
3487 struct e1000_hw *hw = &adapter->hw; 3472 struct e1000_hw *hw = &adapter->hw;
3488 bool link_active = 0; 3473 bool link_active = 0;
@@ -3563,7 +3548,7 @@ static void e1000_watchdog_task(struct work_struct *work)
3563 u32 link, tctl; 3548 u32 link, tctl;
3564 int tx_pending = 0; 3549 int tx_pending = 0;
3565 3550
3566 link = e1000_has_link(adapter); 3551 link = e1000e_has_link(adapter);
3567 if ((netif_carrier_ok(netdev)) && link) { 3552 if ((netif_carrier_ok(netdev)) && link) {
3568 e1000e_enable_receives(adapter); 3553 e1000e_enable_receives(adapter);
3569 goto link_up; 3554 goto link_up;
@@ -5134,7 +5119,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5134 5119
5135 e1000_eeprom_checks(adapter); 5120 e1000_eeprom_checks(adapter);
5136 5121
5137 /* copy the MAC address out of the NVM */ 5122 /* copy the MAC address */
5138 if (e1000e_read_mac_addr(&adapter->hw)) 5123 if (e1000e_read_mac_addr(&adapter->hw))
5139 e_err("NVM Read Error while reading MAC address\n"); 5124 e_err("NVM Read Error while reading MAC address\n");
5140 5125
@@ -5326,7 +5311,7 @@ static struct pci_error_handlers e1000_err_handler = {
5326 .resume = e1000_io_resume, 5311 .resume = e1000_io_resume,
5327}; 5312};
5328 5313
5329static struct pci_device_id e1000_pci_tbl[] = { 5314static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
5330 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, 5315 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
5331 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, 5316 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
5332 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, 5317 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 94c59498cdb6..1b05bdf62c3c 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1287,9 +1287,10 @@ set_multicast_list(struct net_device *dev)
1287 struct eepro_local *lp = netdev_priv(dev); 1287 struct eepro_local *lp = netdev_priv(dev);
1288 short ioaddr = dev->base_addr; 1288 short ioaddr = dev->base_addr;
1289 unsigned short mode; 1289 unsigned short mode;
1290 struct dev_mc_list *dmi=dev->mc_list; 1290 struct dev_mc_list *dmi;
1291 int mc_count = netdev_mc_count(dev);
1291 1292
1292 if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63) 1293 if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63)
1293 { 1294 {
1294 eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ 1295 eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
1295 mode = inb(ioaddr + REG2); 1296 mode = inb(ioaddr + REG2);
@@ -1299,7 +1300,7 @@ set_multicast_list(struct net_device *dev)
1299 eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */ 1300 eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
1300 } 1301 }
1301 1302
1302 else if (dev->mc_count==0 ) 1303 else if (mc_count == 0)
1303 { 1304 {
1304 eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ 1305 eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
1305 mode = inb(ioaddr + REG2); 1306 mode = inb(ioaddr + REG2);
@@ -1329,12 +1330,10 @@ set_multicast_list(struct net_device *dev)
1329 outw(MC_SETUP, ioaddr + IO_PORT); 1330 outw(MC_SETUP, ioaddr + IO_PORT);
1330 outw(0, ioaddr + IO_PORT); 1331 outw(0, ioaddr + IO_PORT);
1331 outw(0, ioaddr + IO_PORT); 1332 outw(0, ioaddr + IO_PORT);
1332 outw(6*(dev->mc_count + 1), ioaddr + IO_PORT); 1333 outw(6 * (mc_count + 1), ioaddr + IO_PORT);
1333 1334
1334 for (i = 0; i < dev->mc_count; i++) 1335 netdev_for_each_mc_addr(dmi, dev) {
1335 { 1336 eaddrs = (unsigned short *) dmi->dmi_addr;
1336 eaddrs=(unsigned short *)dmi->dmi_addr;
1337 dmi=dmi->next;
1338 outw(*eaddrs++, ioaddr + IO_PORT); 1337 outw(*eaddrs++, ioaddr + IO_PORT);
1339 outw(*eaddrs++, ioaddr + IO_PORT); 1338 outw(*eaddrs++, ioaddr + IO_PORT);
1340 outw(*eaddrs++, ioaddr + IO_PORT); 1339 outw(*eaddrs++, ioaddr + IO_PORT);
@@ -1348,7 +1347,7 @@ set_multicast_list(struct net_device *dev)
1348 outb(MC_SETUP, ioaddr); 1347 outb(MC_SETUP, ioaddr);
1349 1348
1350 /* Update the transmit queue */ 1349 /* Update the transmit queue */
1351 i = lp->tx_end + XMT_HEADER + 6*(dev->mc_count + 1); 1350 i = lp->tx_end + XMT_HEADER + 6 * (mc_count + 1);
1352 1351
1353 if (lp->tx_start != lp->tx_end) 1352 if (lp->tx_start != lp->tx_end)
1354 { 1353 {
@@ -1380,8 +1379,8 @@ set_multicast_list(struct net_device *dev)
1380 break; 1379 break;
1381 } else if ((i & 0x0f) == 0x03) { /* MC-Done */ 1380 } else if ((i & 0x0f) == 0x03) { /* MC-Done */
1382 printk(KERN_DEBUG "%s: set Rx mode to %d address%s.\n", 1381 printk(KERN_DEBUG "%s: set Rx mode to %d address%s.\n",
1383 dev->name, dev->mc_count, 1382 dev->name, mc_count,
1384 dev->mc_count > 1 ? "es":""); 1383 mc_count > 1 ? "es":"");
1385 break; 1384 break;
1386 } 1385 }
1387 } 1386 }
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 6fbfc8eee632..7013dc8a6cbc 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -1578,7 +1578,7 @@ static void eexp_setup_filter(struct net_device *dev)
1578{ 1578{
1579 struct dev_mc_list *dmi; 1579 struct dev_mc_list *dmi;
1580 unsigned short ioaddr = dev->base_addr; 1580 unsigned short ioaddr = dev->base_addr;
1581 int count = dev->mc_count; 1581 int count = netdev_mc_count(dev);
1582 int i; 1582 int i;
1583 if (count > 8) { 1583 if (count > 8) {
1584 printk(KERN_INFO "%s: too many multicast addresses (%d)\n", 1584 printk(KERN_INFO "%s: too many multicast addresses (%d)\n",
@@ -1588,23 +1588,19 @@ static void eexp_setup_filter(struct net_device *dev)
1588 1588
1589 outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR); 1589 outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR);
1590 outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST)); 1590 outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST));
1591 for (i = 0, dmi = dev->mc_list; i < count; i++, dmi = dmi->next) { 1591 i = 0;
1592 unsigned short *data; 1592 netdev_for_each_mc_addr(dmi, dev) {
1593 if (!dmi) { 1593 unsigned short *data = (unsigned short *) dmi->dmi_addr;
1594 printk(KERN_INFO "%s: too few multicast addresses\n", dev->name); 1594
1595 if (i == count)
1595 break; 1596 break;
1596 }
1597 if (dmi->dmi_addrlen != ETH_ALEN) {
1598 printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name);
1599 continue;
1600 }
1601 data = (unsigned short *)dmi->dmi_addr;
1602 outw((CONF_MULTICAST+(6*i)) & ~31, ioaddr+SM_PTR); 1597 outw((CONF_MULTICAST+(6*i)) & ~31, ioaddr+SM_PTR);
1603 outw(data[0], ioaddr+SHADOW(CONF_MULTICAST+(6*i))); 1598 outw(data[0], ioaddr+SHADOW(CONF_MULTICAST+(6*i)));
1604 outw((CONF_MULTICAST+(6*i)+2) & ~31, ioaddr+SM_PTR); 1599 outw((CONF_MULTICAST+(6*i)+2) & ~31, ioaddr+SM_PTR);
1605 outw(data[1], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+2)); 1600 outw(data[1], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+2));
1606 outw((CONF_MULTICAST+(6*i)+4) & ~31, ioaddr+SM_PTR); 1601 outw((CONF_MULTICAST+(6*i)+4) & ~31, ioaddr+SM_PTR);
1607 outw(data[2], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+4)); 1602 outw(data[2], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+4));
1603 i++;
1608 } 1604 }
1609} 1605}
1610 1606
@@ -1627,9 +1623,9 @@ eexp_set_multicast(struct net_device *dev)
1627 } 1623 }
1628 if (!(dev->flags & IFF_PROMISC)) { 1624 if (!(dev->flags & IFF_PROMISC)) {
1629 eexp_setup_filter(dev); 1625 eexp_setup_filter(dev);
1630 if (lp->old_mc_count != dev->mc_count) { 1626 if (lp->old_mc_count != netdev_mc_count(dev)) {
1631 kick = 1; 1627 kick = 1;
1632 lp->old_mc_count = dev->mc_count; 1628 lp->old_mc_count = netdev_mc_count(dev);
1633 } 1629 }
1634 } 1630 }
1635 if (kick) { 1631 if (kick) {
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 7b62336e6736..b004eaba3d7b 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -1967,7 +1967,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
1967{ 1967{
1968 struct ehea_port *port = netdev_priv(dev); 1968 struct ehea_port *port = netdev_priv(dev);
1969 struct dev_mc_list *k_mcl_entry; 1969 struct dev_mc_list *k_mcl_entry;
1970 int ret, i; 1970 int ret;
1971 1971
1972 if (dev->flags & IFF_PROMISC) { 1972 if (dev->flags & IFF_PROMISC) {
1973 ehea_promiscuous(dev, 1); 1973 ehea_promiscuous(dev, 1);
@@ -1981,7 +1981,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
1981 } 1981 }
1982 ehea_allmulti(dev, 0); 1982 ehea_allmulti(dev, 0);
1983 1983
1984 if (dev->mc_count) { 1984 if (!netdev_mc_empty(dev)) {
1985 ret = ehea_drop_multicast_list(dev); 1985 ret = ehea_drop_multicast_list(dev);
1986 if (ret) { 1986 if (ret) {
1987 /* Dropping the current multicast list failed. 1987 /* Dropping the current multicast list failed.
@@ -1990,15 +1990,14 @@ static void ehea_set_multicast_list(struct net_device *dev)
1990 ehea_allmulti(dev, 1); 1990 ehea_allmulti(dev, 1);
1991 } 1991 }
1992 1992
1993 if (dev->mc_count > port->adapter->max_mc_mac) { 1993 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
1994 ehea_info("Mcast registration limit reached (0x%llx). " 1994 ehea_info("Mcast registration limit reached (0x%llx). "
1995 "Use ALLMULTI!", 1995 "Use ALLMULTI!",
1996 port->adapter->max_mc_mac); 1996 port->adapter->max_mc_mac);
1997 goto out; 1997 goto out;
1998 } 1998 }
1999 1999
2000 for (i = 0, k_mcl_entry = dev->mc_list; i < dev->mc_count; i++, 2000 netdev_for_each_mc_addr(k_mcl_entry, dev)
2001 k_mcl_entry = k_mcl_entry->next)
2002 ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr); 2001 ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
2003 2002
2004 } 2003 }
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index 66813c91a720..3ee32e58c7ec 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -1413,7 +1413,7 @@ static void enc28j60_set_multicast_list(struct net_device *dev)
1413 if (netif_msg_link(priv)) 1413 if (netif_msg_link(priv))
1414 dev_info(&dev->dev, "promiscuous mode\n"); 1414 dev_info(&dev->dev, "promiscuous mode\n");
1415 priv->rxfilter = RXFILTER_PROMISC; 1415 priv->rxfilter = RXFILTER_PROMISC;
1416 } else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count) { 1416 } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
1417 if (netif_msg_link(priv)) 1417 if (netif_msg_link(priv))
1418 dev_info(&dev->dev, "%smulticast mode\n", 1418 dev_info(&dev->dev, "%smulticast mode\n",
1419 (dev->flags & IFF_ALLMULTI) ? "all-" : ""); 1419 (dev->flags & IFF_ALLMULTI) ? "all-" : "");
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index e1c2076228ba..ee01f5a6d0d4 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -34,7 +34,7 @@
34 34
35#define DRV_NAME "enic" 35#define DRV_NAME "enic"
36#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver" 36#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver"
37#define DRV_VERSION "1.1.0.100" 37#define DRV_VERSION "1.1.0.241a"
38#define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc" 38#define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc"
39#define PFX DRV_NAME ": " 39#define PFX DRV_NAME ": "
40 40
@@ -89,9 +89,12 @@ struct enic {
89 spinlock_t devcmd_lock; 89 spinlock_t devcmd_lock;
90 u8 mac_addr[ETH_ALEN]; 90 u8 mac_addr[ETH_ALEN];
91 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; 91 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
92 unsigned int flags;
92 unsigned int mc_count; 93 unsigned int mc_count;
93 int csum_rx_enabled; 94 int csum_rx_enabled;
94 u32 port_mtu; 95 u32 port_mtu;
96 u32 rx_coalesce_usecs;
97 u32 tx_coalesce_usecs;
95 98
96 /* work queue cache line section */ 99 /* work queue cache line section */
97 ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX]; 100 ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index f875751af15e..cf098bb636b8 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -51,7 +51,7 @@
51#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ 51#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
52 52
53/* Supported devices */ 53/* Supported devices */
54static struct pci_device_id enic_id_table[] = { 54static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
55 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, 55 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
56 { 0, } /* end of table */ 56 { 0, } /* end of table */
57}; 57};
@@ -261,6 +261,62 @@ static void enic_set_msglevel(struct net_device *netdev, u32 value)
261 enic->msg_enable = value; 261 enic->msg_enable = value;
262} 262}
263 263
264static int enic_get_coalesce(struct net_device *netdev,
265 struct ethtool_coalesce *ecmd)
266{
267 struct enic *enic = netdev_priv(netdev);
268
269 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
270 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
271
272 return 0;
273}
274
275static int enic_set_coalesce(struct net_device *netdev,
276 struct ethtool_coalesce *ecmd)
277{
278 struct enic *enic = netdev_priv(netdev);
279 u32 tx_coalesce_usecs;
280 u32 rx_coalesce_usecs;
281
282 tx_coalesce_usecs = min_t(u32,
283 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
284 ecmd->tx_coalesce_usecs);
285 rx_coalesce_usecs = min_t(u32,
286 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
287 ecmd->rx_coalesce_usecs);
288
289 switch (vnic_dev_get_intr_mode(enic->vdev)) {
290 case VNIC_DEV_INTR_MODE_INTX:
291 if (tx_coalesce_usecs != rx_coalesce_usecs)
292 return -EINVAL;
293
294 vnic_intr_coalescing_timer_set(&enic->intr[ENIC_INTX_WQ_RQ],
295 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
296 break;
297 case VNIC_DEV_INTR_MODE_MSI:
298 if (tx_coalesce_usecs != rx_coalesce_usecs)
299 return -EINVAL;
300
301 vnic_intr_coalescing_timer_set(&enic->intr[0],
302 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
303 break;
304 case VNIC_DEV_INTR_MODE_MSIX:
305 vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_WQ],
306 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
307 vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_RQ],
308 INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs));
309 break;
310 default:
311 break;
312 }
313
314 enic->tx_coalesce_usecs = tx_coalesce_usecs;
315 enic->rx_coalesce_usecs = rx_coalesce_usecs;
316
317 return 0;
318}
319
264static const struct ethtool_ops enic_ethtool_ops = { 320static const struct ethtool_ops enic_ethtool_ops = {
265 .get_settings = enic_get_settings, 321 .get_settings = enic_get_settings,
266 .get_drvinfo = enic_get_drvinfo, 322 .get_drvinfo = enic_get_drvinfo,
@@ -278,6 +334,8 @@ static const struct ethtool_ops enic_ethtool_ops = {
278 .set_sg = ethtool_op_set_sg, 334 .set_sg = ethtool_op_set_sg,
279 .get_tso = ethtool_op_get_tso, 335 .get_tso = ethtool_op_get_tso,
280 .set_tso = enic_set_tso, 336 .set_tso = enic_set_tso,
337 .get_coalesce = enic_get_coalesce,
338 .set_coalesce = enic_set_coalesce,
281 .get_flags = ethtool_op_get_flags, 339 .get_flags = ethtool_op_get_flags,
282 .set_flags = ethtool_op_set_flags, 340 .set_flags = ethtool_op_set_flags,
283}; 341};
@@ -363,12 +421,12 @@ static void enic_mtu_check(struct enic *enic)
363 u32 mtu = vnic_dev_mtu(enic->vdev); 421 u32 mtu = vnic_dev_mtu(enic->vdev);
364 422
365 if (mtu && mtu != enic->port_mtu) { 423 if (mtu && mtu != enic->port_mtu) {
424 enic->port_mtu = mtu;
366 if (mtu < enic->netdev->mtu) 425 if (mtu < enic->netdev->mtu)
367 printk(KERN_WARNING PFX 426 printk(KERN_WARNING PFX
368 "%s: interface MTU (%d) set higher " 427 "%s: interface MTU (%d) set higher "
369 "than switch port MTU (%d)\n", 428 "than switch port MTU (%d)\n",
370 enic->netdev->name, enic->netdev->mtu, mtu); 429 enic->netdev->name, enic->netdev->mtu, mtu);
371 enic->port_mtu = mtu;
372 } 430 }
373} 431}
374 432
@@ -673,7 +731,7 @@ static inline void enic_queue_wq_skb(struct enic *enic,
673 731
674/* netif_tx_lock held, process context with BHs disabled, or BH */ 732/* netif_tx_lock held, process context with BHs disabled, or BH */
675static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, 733static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
676 struct net_device *netdev) 734 struct net_device *netdev)
677{ 735{
678 struct enic *enic = netdev_priv(netdev); 736 struct enic *enic = netdev_priv(netdev);
679 struct vnic_wq *wq = &enic->wq[0]; 737 struct vnic_wq *wq = &enic->wq[0];
@@ -764,15 +822,16 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
764static void enic_set_multicast_list(struct net_device *netdev) 822static void enic_set_multicast_list(struct net_device *netdev)
765{ 823{
766 struct enic *enic = netdev_priv(netdev); 824 struct enic *enic = netdev_priv(netdev);
767 struct dev_mc_list *list = netdev->mc_list; 825 struct dev_mc_list *list;
768 int directed = 1; 826 int directed = 1;
769 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; 827 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
770 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; 828 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
771 int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0; 829 int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
830 unsigned int mc_count = netdev_mc_count(netdev);
772 int allmulti = (netdev->flags & IFF_ALLMULTI) || 831 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
773 (netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS); 832 mc_count > ENIC_MULTICAST_PERFECT_FILTERS;
833 unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0);
774 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; 834 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
775 unsigned int mc_count = netdev->mc_count;
776 unsigned int i, j; 835 unsigned int i, j;
777 836
778 if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) 837 if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS)
@@ -780,8 +839,11 @@ static void enic_set_multicast_list(struct net_device *netdev)
780 839
781 spin_lock(&enic->devcmd_lock); 840 spin_lock(&enic->devcmd_lock);
782 841
783 vnic_dev_packet_filter(enic->vdev, directed, 842 if (enic->flags != flags) {
784 multicast, broadcast, promisc, allmulti); 843 enic->flags = flags;
844 vnic_dev_packet_filter(enic->vdev, directed,
845 multicast, broadcast, promisc, allmulti);
846 }
785 847
786 /* Is there an easier way? Trying to minimize to 848 /* Is there an easier way? Trying to minimize to
787 * calls to add/del multicast addrs. We keep the 849 * calls to add/del multicast addrs. We keep the
@@ -789,9 +851,11 @@ static void enic_set_multicast_list(struct net_device *netdev)
789 * look for changes to add/del. 851 * look for changes to add/del.
790 */ 852 */
791 853
792 for (i = 0; list && i < mc_count; i++) { 854 i = 0;
793 memcpy(mc_addr[i], list->dmi_addr, ETH_ALEN); 855 netdev_for_each_mc_addr(list, netdev) {
794 list = list->next; 856 if (i == mc_count)
857 break;
858 memcpy(mc_addr[i++], list->dmi_addr, ETH_ALEN);
795 } 859 }
796 860
797 for (i = 0; i < enic->mc_count; i++) { 861 for (i = 0; i < enic->mc_count; i++) {
@@ -1084,34 +1148,6 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1084 return 0; 1148 return 0;
1085} 1149}
1086 1150
1087static void enic_rq_drop_buf(struct vnic_rq *rq,
1088 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
1089 int skipped, void *opaque)
1090{
1091 struct enic *enic = vnic_dev_priv(rq->vdev);
1092 struct sk_buff *skb = buf->os_buf;
1093
1094 if (skipped)
1095 return;
1096
1097 pci_unmap_single(enic->pdev, buf->dma_addr,
1098 buf->len, PCI_DMA_FROMDEVICE);
1099
1100 dev_kfree_skb_any(skb);
1101}
1102
1103static int enic_rq_service_drop(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1104 u8 type, u16 q_number, u16 completed_index, void *opaque)
1105{
1106 struct enic *enic = vnic_dev_priv(vdev);
1107
1108 vnic_rq_service(&enic->rq[q_number], cq_desc,
1109 completed_index, VNIC_RQ_RETURN_DESC,
1110 enic_rq_drop_buf, opaque);
1111
1112 return 0;
1113}
1114
1115static int enic_poll(struct napi_struct *napi, int budget) 1151static int enic_poll(struct napi_struct *napi, int budget)
1116{ 1152{
1117 struct enic *enic = container_of(napi, struct enic, napi); 1153 struct enic *enic = container_of(napi, struct enic, napi);
@@ -1119,6 +1155,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
1119 unsigned int rq_work_to_do = budget; 1155 unsigned int rq_work_to_do = budget;
1120 unsigned int wq_work_to_do = -1; /* no limit */ 1156 unsigned int wq_work_to_do = -1; /* no limit */
1121 unsigned int work_done, rq_work_done, wq_work_done; 1157 unsigned int work_done, rq_work_done, wq_work_done;
1158 int err;
1122 1159
1123 /* Service RQ (first) and WQ 1160 /* Service RQ (first) and WQ
1124 */ 1161 */
@@ -1142,16 +1179,19 @@ static int enic_poll(struct napi_struct *napi, int budget)
1142 0 /* don't unmask intr */, 1179 0 /* don't unmask intr */,
1143 0 /* don't reset intr timer */); 1180 0 /* don't reset intr timer */);
1144 1181
1145 if (rq_work_done > 0) { 1182 err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
1146 1183
1147 /* Replenish RQ 1184 /* Buffer allocation failed. Stay in polling
1148 */ 1185 * mode so we can try to fill the ring again.
1186 */
1149 1187
1150 vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); 1188 if (err)
1189 rq_work_done = rq_work_to_do;
1151 1190
1152 } else { 1191 if (rq_work_done < rq_work_to_do) {
1153 1192
1154 /* If no work done, flush all LROs and exit polling 1193 /* Some work done, but not enough to stay in polling,
1194 * flush all LROs and exit polling
1155 */ 1195 */
1156 1196
1157 if (netdev->features & NETIF_F_LRO) 1197 if (netdev->features & NETIF_F_LRO)
@@ -1170,6 +1210,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1170 struct net_device *netdev = enic->netdev; 1210 struct net_device *netdev = enic->netdev;
1171 unsigned int work_to_do = budget; 1211 unsigned int work_to_do = budget;
1172 unsigned int work_done; 1212 unsigned int work_done;
1213 int err;
1173 1214
1174 /* Service RQ 1215 /* Service RQ
1175 */ 1216 */
@@ -1177,25 +1218,30 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1177 work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], 1218 work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
1178 work_to_do, enic_rq_service, NULL); 1219 work_to_do, enic_rq_service, NULL);
1179 1220
1180 if (work_done > 0) { 1221 /* Return intr event credits for this polling
1181 1222 * cycle. An intr event is the completion of a
1182 /* Replenish RQ 1223 * RQ packet.
1183 */ 1224 */
1184
1185 vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
1186
1187 /* Return intr event credits for this polling
1188 * cycle. An intr event is the completion of a
1189 * RQ packet.
1190 */
1191 1225
1226 if (work_done > 0)
1192 vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ], 1227 vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ],
1193 work_done, 1228 work_done,
1194 0 /* don't unmask intr */, 1229 0 /* don't unmask intr */,
1195 0 /* don't reset intr timer */); 1230 0 /* don't reset intr timer */);
1196 } else {
1197 1231
1198 /* If no work done, flush all LROs and exit polling 1232 err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
1233
1234 /* Buffer allocation failed. Stay in polling mode
1235 * so we can try to fill the ring again.
1236 */
1237
1238 if (err)
1239 work_done = work_to_do;
1240
1241 if (work_done < work_to_do) {
1242
1243 /* Some work done, but not enough to stay in polling,
1244 * flush all LROs and exit polling
1199 */ 1245 */
1200 1246
1201 if (netdev->features & NETIF_F_LRO) 1247 if (netdev->features & NETIF_F_LRO)
@@ -1304,6 +1350,24 @@ static int enic_request_intr(struct enic *enic)
1304 return err; 1350 return err;
1305} 1351}
1306 1352
1353static void enic_synchronize_irqs(struct enic *enic)
1354{
1355 unsigned int i;
1356
1357 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1358 case VNIC_DEV_INTR_MODE_INTX:
1359 case VNIC_DEV_INTR_MODE_MSI:
1360 synchronize_irq(enic->pdev->irq);
1361 break;
1362 case VNIC_DEV_INTR_MODE_MSIX:
1363 for (i = 0; i < enic->intr_count; i++)
1364 synchronize_irq(enic->msix_entry[i].vector);
1365 break;
1366 default:
1367 break;
1368 }
1369}
1370
1307static int enic_notify_set(struct enic *enic) 1371static int enic_notify_set(struct enic *enic)
1308{ 1372{
1309 int err; 1373 int err;
@@ -1360,11 +1424,13 @@ static int enic_open(struct net_device *netdev)
1360 } 1424 }
1361 1425
1362 for (i = 0; i < enic->rq_count; i++) { 1426 for (i = 0; i < enic->rq_count; i++) {
1363 err = vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf); 1427 vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
1364 if (err) { 1428 /* Need at least one buffer on ring to get going */
1429 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1365 printk(KERN_ERR PFX 1430 printk(KERN_ERR PFX
1366 "%s: Unable to alloc receive buffers.\n", 1431 "%s: Unable to alloc receive buffers.\n",
1367 netdev->name); 1432 netdev->name);
1433 err = -ENOMEM;
1368 goto err_out_notify_unset; 1434 goto err_out_notify_unset;
1369 } 1435 }
1370 } 1436 }
@@ -1409,16 +1475,19 @@ static int enic_stop(struct net_device *netdev)
1409 unsigned int i; 1475 unsigned int i;
1410 int err; 1476 int err;
1411 1477
1478 for (i = 0; i < enic->intr_count; i++)
1479 vnic_intr_mask(&enic->intr[i]);
1480
1481 enic_synchronize_irqs(enic);
1482
1412 del_timer_sync(&enic->notify_timer); 1483 del_timer_sync(&enic->notify_timer);
1413 1484
1414 spin_lock(&enic->devcmd_lock); 1485 spin_lock(&enic->devcmd_lock);
1415 vnic_dev_disable(enic->vdev); 1486 vnic_dev_disable(enic->vdev);
1416 spin_unlock(&enic->devcmd_lock); 1487 spin_unlock(&enic->devcmd_lock);
1417 napi_disable(&enic->napi); 1488 napi_disable(&enic->napi);
1418 netif_stop_queue(netdev); 1489 netif_carrier_off(netdev);
1419 1490 netif_tx_disable(netdev);
1420 for (i = 0; i < enic->intr_count; i++)
1421 vnic_intr_mask(&enic->intr[i]);
1422 1491
1423 for (i = 0; i < enic->wq_count; i++) { 1492 for (i = 0; i < enic->wq_count; i++) {
1424 err = vnic_wq_disable(&enic->wq[i]); 1493 err = vnic_wq_disable(&enic->wq[i]);
@@ -1436,11 +1505,6 @@ static int enic_stop(struct net_device *netdev)
1436 spin_unlock(&enic->devcmd_lock); 1505 spin_unlock(&enic->devcmd_lock);
1437 enic_free_intr(enic); 1506 enic_free_intr(enic);
1438 1507
1439 (void)vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
1440 -1, enic_rq_service_drop, NULL);
1441 (void)vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
1442 -1, enic_wq_service, NULL);
1443
1444 for (i = 0; i < enic->wq_count; i++) 1508 for (i = 0; i < enic->wq_count; i++)
1445 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); 1509 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1446 for (i = 0; i < enic->rq_count; i++) 1510 for (i = 0; i < enic->rq_count; i++)
@@ -1762,7 +1826,8 @@ int enic_dev_init(struct enic *enic)
1762 err = enic_set_intr_mode(enic); 1826 err = enic_set_intr_mode(enic);
1763 if (err) { 1827 if (err) {
1764 printk(KERN_ERR PFX 1828 printk(KERN_ERR PFX
1765 "Failed to set intr mode, aborting.\n"); 1829 "Failed to set intr mode based on resource "
1830 "counts and system capabilities, aborting.\n");
1766 return err; 1831 return err;
1767 } 1832 }
1768 1833
@@ -1986,6 +2051,9 @@ static int __devinit enic_probe(struct pci_dev *pdev,
1986 goto err_out_dev_deinit; 2051 goto err_out_dev_deinit;
1987 } 2052 }
1988 2053
2054 enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2055 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2056
1989 netdev->netdev_ops = &enic_netdev_ops; 2057 netdev->netdev_ops = &enic_netdev_ops;
1990 netdev->watchdog_timeo = 2 * HZ; 2058 netdev->watchdog_timeo = 2 * HZ;
1991 netdev->ethtool_ops = &enic_ethtool_ops; 2059 netdev->ethtool_ops = &enic_ethtool_ops;
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 32111144efc9..02839bf0fe8b 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -66,21 +66,21 @@ int enic_get_vnic_config(struct enic *enic)
66 GET_CONFIG(wq_desc_count); 66 GET_CONFIG(wq_desc_count);
67 GET_CONFIG(rq_desc_count); 67 GET_CONFIG(rq_desc_count);
68 GET_CONFIG(mtu); 68 GET_CONFIG(mtu);
69 GET_CONFIG(intr_timer);
70 GET_CONFIG(intr_timer_type); 69 GET_CONFIG(intr_timer_type);
71 GET_CONFIG(intr_mode); 70 GET_CONFIG(intr_mode);
71 GET_CONFIG(intr_timer_usec);
72 72
73 c->wq_desc_count = 73 c->wq_desc_count =
74 min_t(u32, ENIC_MAX_WQ_DESCS, 74 min_t(u32, ENIC_MAX_WQ_DESCS,
75 max_t(u32, ENIC_MIN_WQ_DESCS, 75 max_t(u32, ENIC_MIN_WQ_DESCS,
76 c->wq_desc_count)); 76 c->wq_desc_count));
77 c->wq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */ 77 c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
78 78
79 c->rq_desc_count = 79 c->rq_desc_count =
80 min_t(u32, ENIC_MAX_RQ_DESCS, 80 min_t(u32, ENIC_MAX_RQ_DESCS,
81 max_t(u32, ENIC_MIN_RQ_DESCS, 81 max_t(u32, ENIC_MIN_RQ_DESCS,
82 c->rq_desc_count)); 82 c->rq_desc_count));
83 c->rq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */ 83 c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
84 84
85 if (c->mtu == 0) 85 if (c->mtu == 0)
86 c->mtu = 1500; 86 c->mtu = 1500;
@@ -88,15 +88,17 @@ int enic_get_vnic_config(struct enic *enic)
88 max_t(u16, ENIC_MIN_MTU, 88 max_t(u16, ENIC_MIN_MTU,
89 c->mtu)); 89 c->mtu));
90 90
91 c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer); 91 c->intr_timer_usec = min_t(u32,
92 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
93 c->intr_timer_usec);
92 94
93 printk(KERN_INFO PFX "vNIC MAC addr %pM wq/rq %d/%d\n", 95 printk(KERN_INFO PFX "vNIC MAC addr %pM wq/rq %d/%d\n",
94 enic->mac_addr, c->wq_desc_count, c->rq_desc_count); 96 enic->mac_addr, c->wq_desc_count, c->rq_desc_count);
95 printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d " 97 printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d "
96 "intr timer %d\n", 98 "intr timer %d usec\n",
97 c->mtu, ENIC_SETTING(enic, TXCSUM), 99 c->mtu, ENIC_SETTING(enic, TXCSUM),
98 ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO), 100 ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO),
99 ENIC_SETTING(enic, LRO), c->intr_timer); 101 ENIC_SETTING(enic, LRO), c->intr_timer_usec);
100 102
101 return 0; 103 return 0;
102} 104}
@@ -303,7 +305,7 @@ void enic_init_vnic_resources(struct enic *enic)
303 305
304 for (i = 0; i < enic->intr_count; i++) { 306 for (i = 0; i < enic->intr_count; i++) {
305 vnic_intr_init(&enic->intr[i], 307 vnic_intr_init(&enic->intr[i],
306 enic->config.intr_timer, 308 INTR_COALESCE_USEC_TO_HW(enic->config.intr_timer_usec),
307 enic->config.intr_timer_type, 309 enic->config.intr_timer_type,
308 mask_on_assertion); 310 mask_on_assertion);
309 } 311 }
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index 29a48e8b59d3..69b9b70c7da0 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -36,7 +36,6 @@ struct vnic_res {
36}; 36};
37 37
38#define VNIC_DEV_CAP_INIT 0x0001 38#define VNIC_DEV_CAP_INIT 0x0001
39#define VNIC_DEV_CAP_PERBI 0x0002
40 39
41struct vnic_dev { 40struct vnic_dev {
42 void *priv; 41 void *priv;
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h
index 6332ac9391b8..8eeb6758491b 100644
--- a/drivers/net/enic/vnic_enet.h
+++ b/drivers/net/enic/vnic_enet.h
@@ -20,6 +20,10 @@
20#ifndef _VNIC_ENIC_H_ 20#ifndef _VNIC_ENIC_H_
21#define _VNIC_ENIC_H_ 21#define _VNIC_ENIC_H_
22 22
23/* Hardware intr coalesce timer is in units of 1.5us */
24#define INTR_COALESCE_USEC_TO_HW(usec) ((usec) * 2/3)
25#define INTR_COALESCE_HW_TO_USEC(usec) ((usec) * 3/2)
26
23/* Device-specific region: enet configuration */ 27/* Device-specific region: enet configuration */
24struct vnic_enet_config { 28struct vnic_enet_config {
25 u32 flags; 29 u32 flags;
@@ -30,6 +34,7 @@ struct vnic_enet_config {
30 u8 intr_timer_type; 34 u8 intr_timer_type;
31 u8 intr_mode; 35 u8 intr_mode;
32 char devname[16]; 36 char devname[16];
37 u32 intr_timer_usec;
33}; 38};
34 39
35#define VENETF_TSO 0x1 /* TSO enabled */ 40#define VENETF_TSO 0x1 /* TSO enabled */
diff --git a/drivers/net/enic/vnic_intr.c b/drivers/net/enic/vnic_intr.c
index 1f8786d7195e..3934309a9498 100644
--- a/drivers/net/enic/vnic_intr.c
+++ b/drivers/net/enic/vnic_intr.c
@@ -50,12 +50,18 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
50void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer, 50void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
51 unsigned int coalescing_type, unsigned int mask_on_assertion) 51 unsigned int coalescing_type, unsigned int mask_on_assertion)
52{ 52{
53 iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer); 53 vnic_intr_coalescing_timer_set(intr, coalescing_timer);
54 iowrite32(coalescing_type, &intr->ctrl->coalescing_type); 54 iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
55 iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion); 55 iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
56 iowrite32(0, &intr->ctrl->int_credits); 56 iowrite32(0, &intr->ctrl->int_credits);
57} 57}
58 58
59void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
60 unsigned int coalescing_timer)
61{
62 iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
63}
64
59void vnic_intr_clean(struct vnic_intr *intr) 65void vnic_intr_clean(struct vnic_intr *intr)
60{ 66{
61 iowrite32(0, &intr->ctrl->int_credits); 67 iowrite32(0, &intr->ctrl->int_credits);
diff --git a/drivers/net/enic/vnic_intr.h b/drivers/net/enic/vnic_intr.h
index 9a53604edce6..2fe6c6339e3c 100644
--- a/drivers/net/enic/vnic_intr.h
+++ b/drivers/net/enic/vnic_intr.h
@@ -61,6 +61,7 @@ static inline void vnic_intr_unmask(struct vnic_intr *intr)
61static inline void vnic_intr_mask(struct vnic_intr *intr) 61static inline void vnic_intr_mask(struct vnic_intr *intr)
62{ 62{
63 iowrite32(1, &intr->ctrl->mask); 63 iowrite32(1, &intr->ctrl->mask);
64 (void)ioread32(&intr->ctrl->mask);
64} 65}
65 66
66static inline void vnic_intr_return_credits(struct vnic_intr *intr, 67static inline void vnic_intr_return_credits(struct vnic_intr *intr,
@@ -101,6 +102,8 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
101 unsigned int index); 102 unsigned int index);
102void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer, 103void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
103 unsigned int coalescing_type, unsigned int mask_on_assertion); 104 unsigned int coalescing_type, unsigned int mask_on_assertion);
105void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
106 unsigned int coalescing_timer);
104void vnic_intr_clean(struct vnic_intr *intr); 107void vnic_intr_clean(struct vnic_intr *intr);
105 108
106#endif /* _VNIC_INTR_H_ */ 109#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/net/enic/vnic_nic.h b/drivers/net/enic/vnic_nic.h
index eeaf329945d8..cf80ab46d582 100644
--- a/drivers/net/enic/vnic_nic.h
+++ b/drivers/net/enic/vnic_nic.h
@@ -41,12 +41,12 @@
41#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL 41#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
42#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24 42#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
43 43
44#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 0) 44#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 1)
45#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 1) 45#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2)
46#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 2) 46#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3)
47#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 3) 47#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4)
48#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 4) 48#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 5)
49#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 5) 49#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 6)
50 50
51static inline void vnic_set_nic_cfg(u32 *nic_cfg, 51static inline void vnic_set_nic_cfg(u32 *nic_cfg,
52 u8 rss_default_cpu, u8 rss_hash_type, 52 u8 rss_default_cpu, u8 rss_hash_type,
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 41494f7b2ec8..39c271b6be44 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -167,7 +167,7 @@ static const struct epic_chip_info pci_id_tbl[] = {
167}; 167};
168 168
169 169
170static struct pci_device_id epic_pci_tbl[] = { 170static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = {
171 { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 }, 171 { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
172 { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 }, 172 { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
173 { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID, 173 { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
@@ -1390,21 +1390,20 @@ static void set_rx_mode(struct net_device *dev)
1390 outl(0x002C, ioaddr + RxCtrl); 1390 outl(0x002C, ioaddr + RxCtrl);
1391 /* Unconditionally log net taps. */ 1391 /* Unconditionally log net taps. */
1392 memset(mc_filter, 0xff, sizeof(mc_filter)); 1392 memset(mc_filter, 0xff, sizeof(mc_filter));
1393 } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) { 1393 } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
1394 /* There is apparently a chip bug, so the multicast filter 1394 /* There is apparently a chip bug, so the multicast filter
1395 is never enabled. */ 1395 is never enabled. */
1396 /* Too many to filter perfectly -- accept all multicasts. */ 1396 /* Too many to filter perfectly -- accept all multicasts. */
1397 memset(mc_filter, 0xff, sizeof(mc_filter)); 1397 memset(mc_filter, 0xff, sizeof(mc_filter));
1398 outl(0x000C, ioaddr + RxCtrl); 1398 outl(0x000C, ioaddr + RxCtrl);
1399 } else if (dev->mc_count == 0) { 1399 } else if (netdev_mc_empty(dev)) {
1400 outl(0x0004, ioaddr + RxCtrl); 1400 outl(0x0004, ioaddr + RxCtrl);
1401 return; 1401 return;
1402 } else { /* Never executed, for now. */ 1402 } else { /* Never executed, for now. */
1403 struct dev_mc_list *mclist; 1403 struct dev_mc_list *mclist;
1404 1404
1405 memset(mc_filter, 0, sizeof(mc_filter)); 1405 memset(mc_filter, 0, sizeof(mc_filter));
1406 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1406 netdev_for_each_mc_addr(mclist, dev) {
1407 i++, mclist = mclist->next) {
1408 unsigned int bit_nr = 1407 unsigned int bit_nr =
1409 ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f; 1408 ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
1410 mc_filter[bit_nr >> 3] |= (1 << bit_nr); 1409 mc_filter[bit_nr >> 3] |= (1 << bit_nr);
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index 71bfeec33a0b..d3abeee3f110 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1359,7 +1359,7 @@ static void eth16i_multicast(struct net_device *dev)
1359{ 1359{
1360 int ioaddr = dev->base_addr; 1360 int ioaddr = dev->base_addr;
1361 1361
1362 if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC)) 1362 if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
1363 { 1363 {
1364 outb(3, ioaddr + RECEIVE_MODE_REG); 1364 outb(3, ioaddr + RECEIVE_MODE_REG);
1365 } else { 1365 } else {
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index bd1db92aec1b..209742304e20 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -755,7 +755,7 @@ static void ethoc_set_multicast_list(struct net_device *dev)
755{ 755{
756 struct ethoc *priv = netdev_priv(dev); 756 struct ethoc *priv = netdev_priv(dev);
757 u32 mode = ethoc_read(priv, MODER); 757 u32 mode = ethoc_read(priv, MODER);
758 struct dev_mc_list *mc = NULL; 758 struct dev_mc_list *mc;
759 u32 hash[2] = { 0, 0 }; 759 u32 hash[2] = { 0, 0 };
760 760
761 /* set loopback mode if requested */ 761 /* set loopback mode if requested */
@@ -783,8 +783,8 @@ static void ethoc_set_multicast_list(struct net_device *dev)
783 hash[0] = 0xffffffff; 783 hash[0] = 0xffffffff;
784 hash[1] = 0xffffffff; 784 hash[1] = 0xffffffff;
785 } else { 785 } else {
786 for (mc = dev->mc_list; mc; mc = mc->next) { 786 netdev_for_each_mc_addr(mc, dev) {
787 u32 crc = ether_crc(mc->dmi_addrlen, mc->dmi_addr); 787 u32 crc = ether_crc(ETH_ALEN, mc->dmi_addr);
788 int bit = (crc >> 26) & 0x3f; 788 int bit = (crc >> 26) & 0x3f;
789 hash[bit >> 5] |= 1 << (bit & 0x1f); 789 hash[bit >> 5] |= 1 << (bit & 0x1f);
790 } 790 }
@@ -904,7 +904,7 @@ static int ethoc_probe(struct platform_device *pdev)
904 } 904 }
905 905
906 mmio = devm_request_mem_region(&pdev->dev, res->start, 906 mmio = devm_request_mem_region(&pdev->dev, res->start,
907 res->end - res->start + 1, res->name); 907 resource_size(res), res->name);
908 if (!mmio) { 908 if (!mmio) {
909 dev_err(&pdev->dev, "cannot request I/O memory space\n"); 909 dev_err(&pdev->dev, "cannot request I/O memory space\n");
910 ret = -ENXIO; 910 ret = -ENXIO;
@@ -917,7 +917,7 @@ static int ethoc_probe(struct platform_device *pdev)
917 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 917 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
918 if (res) { 918 if (res) {
919 mem = devm_request_mem_region(&pdev->dev, res->start, 919 mem = devm_request_mem_region(&pdev->dev, res->start,
920 res->end - res->start + 1, res->name); 920 resource_size(res), res->name);
921 if (!mem) { 921 if (!mem) {
922 dev_err(&pdev->dev, "cannot request memory space\n"); 922 dev_err(&pdev->dev, "cannot request memory space\n");
923 ret = -ENXIO; 923 ret = -ENXIO;
@@ -945,7 +945,7 @@ static int ethoc_probe(struct platform_device *pdev)
945 priv->dma_alloc = 0; 945 priv->dma_alloc = 0;
946 946
947 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, 947 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
948 mmio->end - mmio->start + 1); 948 resource_size(mmio));
949 if (!priv->iobase) { 949 if (!priv->iobase) {
950 dev_err(&pdev->dev, "cannot remap I/O memory space\n"); 950 dev_err(&pdev->dev, "cannot remap I/O memory space\n");
951 ret = -ENXIO; 951 ret = -ENXIO;
@@ -954,7 +954,7 @@ static int ethoc_probe(struct platform_device *pdev)
954 954
955 if (netdev->mem_end) { 955 if (netdev->mem_end) {
956 priv->membase = devm_ioremap_nocache(&pdev->dev, 956 priv->membase = devm_ioremap_nocache(&pdev->dev,
957 netdev->mem_start, mem->end - mem->start + 1); 957 netdev->mem_start, resource_size(mem));
958 if (!priv->membase) { 958 if (!priv->membase) {
959 dev_err(&pdev->dev, "cannot remap memory space\n"); 959 dev_err(&pdev->dev, "cannot remap memory space\n");
960 ret = -ENXIO; 960 ret = -ENXIO;
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index dd4ba01fd92d..91e59f3a9d6d 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -1169,7 +1169,7 @@ static void set_multicast_list(struct net_device *dev)
1169static void SetMulticastFilter(struct net_device *dev) 1169static void SetMulticastFilter(struct net_device *dev)
1170{ 1170{
1171 struct ewrk3_private *lp = netdev_priv(dev); 1171 struct ewrk3_private *lp = netdev_priv(dev);
1172 struct dev_mc_list *dmi = dev->mc_list; 1172 struct dev_mc_list *dmi;
1173 u_long iobase = dev->base_addr; 1173 u_long iobase = dev->base_addr;
1174 int i; 1174 int i;
1175 char *addrs, bit, byte; 1175 char *addrs, bit, byte;
@@ -1213,9 +1213,8 @@ static void SetMulticastFilter(struct net_device *dev)
1213 } 1213 }
1214 1214
1215 /* Update table */ 1215 /* Update table */
1216 for (i = 0; i < dev->mc_count; i++) { /* for each address in the list */ 1216 netdev_for_each_mc_addr(dmi, dev) {
1217 addrs = dmi->dmi_addr; 1217 addrs = dmi->dmi_addr;
1218 dmi = dmi->next;
1219 if ((*addrs & 0x01) == 1) { /* multicast address? */ 1218 if ((*addrs & 0x01) == 1) { /* multicast address? */
1220 crc = ether_crc_le(ETH_ALEN, addrs); 1219 crc = ether_crc_le(ETH_ALEN, addrs);
1221 hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */ 1220 hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index dac4e595589e..9d5ad08a119f 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1786,18 +1786,16 @@ static void __set_rx_mode(struct net_device *dev)
1786 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1786 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1787 memset(mc_filter, 0xff, sizeof(mc_filter)); 1787 memset(mc_filter, 0xff, sizeof(mc_filter));
1788 rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM; 1788 rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
1789 } else if ((dev->mc_count > multicast_filter_limit) || 1789 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1790 (dev->flags & IFF_ALLMULTI)) { 1790 (dev->flags & IFF_ALLMULTI)) {
1791 /* Too many to match, or accept all multicasts. */ 1791 /* Too many to match, or accept all multicasts. */
1792 memset(mc_filter, 0xff, sizeof(mc_filter)); 1792 memset(mc_filter, 0xff, sizeof(mc_filter));
1793 rx_mode = CR_W_AB | CR_W_AM; 1793 rx_mode = CR_W_AB | CR_W_AM;
1794 } else { 1794 } else {
1795 struct dev_mc_list *mclist; 1795 struct dev_mc_list *mclist;
1796 int i;
1797 1796
1798 memset(mc_filter, 0, sizeof(mc_filter)); 1797 memset(mc_filter, 0, sizeof(mc_filter));
1799 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1798 netdev_for_each_mc_addr(mclist, dev) {
1800 i++, mclist = mclist->next) {
1801 unsigned int bit; 1799 unsigned int bit;
1802 bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F; 1800 bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
1803 mc_filter[bit >> 5] |= (1 << bit); 1801 mc_filter[bit >> 5] |= (1 << bit);
@@ -1941,7 +1939,7 @@ static int netdev_close(struct net_device *dev)
1941 return 0; 1939 return 0;
1942} 1940}
1943 1941
1944static struct pci_device_id fealnx_pci_tbl[] = { 1942static DEFINE_PCI_DEVICE_TABLE(fealnx_pci_tbl) = {
1945 {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1943 {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1946 {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, 1944 {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
1947 {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, 1945 {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 16a1d58419d9..9f98c1c4a344 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -1128,6 +1128,26 @@ static phy_info_t phy_info_dp83848= {
1128 }, 1128 },
1129}; 1129};
1130 1130
1131static phy_info_t phy_info_lan8700 = {
1132 0x0007C0C,
1133 "LAN8700",
1134 (const phy_cmd_t []) { /* config */
1135 { mk_mii_read(MII_REG_CR), mii_parse_cr },
1136 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
1137 { mk_mii_end, }
1138 },
1139 (const phy_cmd_t []) { /* startup */
1140 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
1141 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1142 { mk_mii_end, }
1143 },
1144 (const phy_cmd_t []) { /* act_int */
1145 { mk_mii_end, }
1146 },
1147 (const phy_cmd_t []) { /* shutdown */
1148 { mk_mii_end, }
1149 },
1150};
1131/* ------------------------------------------------------------------------- */ 1151/* ------------------------------------------------------------------------- */
1132 1152
1133static phy_info_t const * const phy_info[] = { 1153static phy_info_t const * const phy_info[] = {
@@ -1137,6 +1157,7 @@ static phy_info_t const * const phy_info[] = {
1137 &phy_info_am79c874, 1157 &phy_info_am79c874,
1138 &phy_info_ks8721bl, 1158 &phy_info_ks8721bl,
1139 &phy_info_dp83848, 1159 &phy_info_dp83848,
1160 &phy_info_lan8700,
1140 NULL 1161 NULL
1141}; 1162};
1142 1163
@@ -1554,7 +1575,7 @@ static void set_multicast_list(struct net_device *dev)
1554{ 1575{
1555 struct fec_enet_private *fep = netdev_priv(dev); 1576 struct fec_enet_private *fep = netdev_priv(dev);
1556 struct dev_mc_list *dmi; 1577 struct dev_mc_list *dmi;
1557 unsigned int i, j, bit, data, crc, tmp; 1578 unsigned int i, bit, data, crc, tmp;
1558 unsigned char hash; 1579 unsigned char hash;
1559 1580
1560 if (dev->flags & IFF_PROMISC) { 1581 if (dev->flags & IFF_PROMISC) {
@@ -1583,9 +1604,7 @@ static void set_multicast_list(struct net_device *dev)
1583 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1604 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1584 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 1605 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1585 1606
1586 dmi = dev->mc_list; 1607 netdev_for_each_mc_addr(dmi, dev) {
1587
1588 for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) {
1589 /* Only support group multicast for now */ 1608 /* Only support group multicast for now */
1590 if (!(dmi->dmi_addr[0] & 1)) 1609 if (!(dmi->dmi_addr[0] & 1))
1591 continue; 1610 continue;
@@ -1658,6 +1677,7 @@ static int fec_enet_init(struct net_device *dev, int index)
1658{ 1677{
1659 struct fec_enet_private *fep = netdev_priv(dev); 1678 struct fec_enet_private *fep = netdev_priv(dev);
1660 struct bufdesc *cbd_base; 1679 struct bufdesc *cbd_base;
1680 struct bufdesc *bdp;
1661 int i; 1681 int i;
1662 1682
1663 /* Allocate memory for buffer descriptors. */ 1683 /* Allocate memory for buffer descriptors. */
@@ -1710,6 +1730,34 @@ static int fec_enet_init(struct net_device *dev, int index)
1710 /* Set MII speed to 2.5 MHz */ 1730 /* Set MII speed to 2.5 MHz */
1711 fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999) 1731 fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999)
1712 / 2500000) / 2) & 0x3F) << 1; 1732 / 2500000) / 2) & 0x3F) << 1;
1733
1734 /* Initialize the receive buffer descriptors. */
1735 bdp = fep->rx_bd_base;
1736 for (i = 0; i < RX_RING_SIZE; i++) {
1737
1738 /* Initialize the BD for every fragment in the page. */
1739 bdp->cbd_sc = 0;
1740 bdp++;
1741 }
1742
1743 /* Set the last buffer to wrap */
1744 bdp--;
1745 bdp->cbd_sc |= BD_SC_WRAP;
1746
1747 /* ...and the same for transmit */
1748 bdp = fep->tx_bd_base;
1749 for (i = 0; i < TX_RING_SIZE; i++) {
1750
1751 /* Initialize the BD for every fragment in the page. */
1752 bdp->cbd_sc = 0;
1753 bdp->cbd_bufaddr = 0;
1754 bdp++;
1755 }
1756
1757 /* Set the last buffer to wrap */
1758 bdp--;
1759 bdp->cbd_sc |= BD_SC_WRAP;
1760
1713 fec_restart(dev, 0); 1761 fec_restart(dev, 0);
1714 1762
1715 /* Queue up command to detect the PHY and initialize the 1763 /* Queue up command to detect the PHY and initialize the
@@ -1730,7 +1778,6 @@ static void
1730fec_restart(struct net_device *dev, int duplex) 1778fec_restart(struct net_device *dev, int duplex)
1731{ 1779{
1732 struct fec_enet_private *fep = netdev_priv(dev); 1780 struct fec_enet_private *fep = netdev_priv(dev);
1733 struct bufdesc *bdp;
1734 int i; 1781 int i;
1735 1782
1736 /* Whack a reset. We should wait for this. */ 1783 /* Whack a reset. We should wait for this. */
@@ -1768,33 +1815,6 @@ fec_restart(struct net_device *dev, int duplex)
1768 } 1815 }
1769 } 1816 }
1770 1817
1771 /* Initialize the receive buffer descriptors. */
1772 bdp = fep->rx_bd_base;
1773 for (i = 0; i < RX_RING_SIZE; i++) {
1774
1775 /* Initialize the BD for every fragment in the page. */
1776 bdp->cbd_sc = BD_ENET_RX_EMPTY;
1777 bdp++;
1778 }
1779
1780 /* Set the last buffer to wrap */
1781 bdp--;
1782 bdp->cbd_sc |= BD_SC_WRAP;
1783
1784 /* ...and the same for transmit */
1785 bdp = fep->tx_bd_base;
1786 for (i = 0; i < TX_RING_SIZE; i++) {
1787
1788 /* Initialize the BD for every fragment in the page. */
1789 bdp->cbd_sc = 0;
1790 bdp->cbd_bufaddr = 0;
1791 bdp++;
1792 }
1793
1794 /* Set the last buffer to wrap */
1795 bdp--;
1796 bdp->cbd_sc |= BD_SC_WRAP;
1797
1798 /* Enable MII mode */ 1818 /* Enable MII mode */
1799 if (duplex) { 1819 if (duplex) {
1800 /* MII enable / FD enable */ 1820 /* MII enable / FD enable */
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 848e8407ea8f..0dbd7219bbde 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -575,19 +575,16 @@ static void mpc52xx_fec_set_multicast_list(struct net_device *dev)
575 out_be32(&fec->gaddr2, 0xffffffff); 575 out_be32(&fec->gaddr2, 0xffffffff);
576 } else { 576 } else {
577 u32 crc; 577 u32 crc;
578 int i;
579 struct dev_mc_list *dmi; 578 struct dev_mc_list *dmi;
580 u32 gaddr1 = 0x00000000; 579 u32 gaddr1 = 0x00000000;
581 u32 gaddr2 = 0x00000000; 580 u32 gaddr2 = 0x00000000;
582 581
583 dmi = dev->mc_list; 582 netdev_for_each_mc_addr(dmi, dev) {
584 for (i=0; i<dev->mc_count; i++) {
585 crc = ether_crc_le(6, dmi->dmi_addr) >> 26; 583 crc = ether_crc_le(6, dmi->dmi_addr) >> 26;
586 if (crc >= 32) 584 if (crc >= 32)
587 gaddr1 |= 1 << (crc-32); 585 gaddr1 |= 1 << (crc-32);
588 else 586 else
589 gaddr2 |= 1 << crc; 587 gaddr2 |= 1 << crc;
590 dmi = dmi->next;
591 } 588 }
592 out_be32(&fec->gaddr1, gaddr1); 589 out_be32(&fec->gaddr1, gaddr1);
593 out_be32(&fec->gaddr2, gaddr2); 590 out_be32(&fec->gaddr2, gaddr2);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 3c340489804a..ca05e5662029 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -3095,7 +3095,7 @@ static void nv_set_multicast(struct net_device *dev)
3095 } else { 3095 } else {
3096 pff |= NVREG_PFF_MYADDR; 3096 pff |= NVREG_PFF_MYADDR;
3097 3097
3098 if (dev->flags & IFF_ALLMULTI || dev->mc_list) { 3098 if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
3099 u32 alwaysOff[2]; 3099 u32 alwaysOff[2];
3100 u32 alwaysOn[2]; 3100 u32 alwaysOn[2];
3101 3101
@@ -3105,8 +3105,7 @@ static void nv_set_multicast(struct net_device *dev)
3105 } else { 3105 } else {
3106 struct dev_mc_list *walk; 3106 struct dev_mc_list *walk;
3107 3107
3108 walk = dev->mc_list; 3108 netdev_for_each_mc_addr(walk, dev) {
3109 while (walk != NULL) {
3110 u32 a, b; 3109 u32 a, b;
3111 a = le32_to_cpu(*(__le32 *) walk->dmi_addr); 3110 a = le32_to_cpu(*(__le32 *) walk->dmi_addr);
3112 b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4])); 3111 b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4]));
@@ -3114,7 +3113,6 @@ static void nv_set_multicast(struct net_device *dev)
3114 alwaysOff[0] &= ~a; 3113 alwaysOff[0] &= ~a;
3115 alwaysOn[1] &= b; 3114 alwaysOn[1] &= b;
3116 alwaysOff[1] &= ~b; 3115 alwaysOff[1] &= ~b;
3117 walk = walk->next;
3118 } 3116 }
3119 } 3117 }
3120 addr[0] = alwaysOn[0]; 3118 addr[0] = alwaysOn[0];
@@ -6198,7 +6196,7 @@ static void nv_shutdown(struct pci_dev *pdev)
6198#define nv_resume NULL 6196#define nv_resume NULL
6199#endif /* CONFIG_PM */ 6197#endif /* CONFIG_PM */
6200 6198
6201static struct pci_device_id pci_tbl[] = { 6199static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
6202 { /* nForce Ethernet Controller */ 6200 { /* nForce Ethernet Controller */
6203 PCI_DEVICE(0x10DE, 0x01C3), 6201 PCI_DEVICE(0x10DE, 0x01C3),
6204 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6202 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
diff --git a/drivers/net/fs_enet/Kconfig b/drivers/net/fs_enet/Kconfig
index 562ea68ed99b..fc073b5a38c7 100644
--- a/drivers/net/fs_enet/Kconfig
+++ b/drivers/net/fs_enet/Kconfig
@@ -1,9 +1,13 @@
1config FS_ENET 1config FS_ENET
2 tristate "Freescale Ethernet Driver" 2 tristate "Freescale Ethernet Driver"
3 depends on CPM1 || CPM2 3 depends on CPM1 || CPM2 || PPC_MPC512x
4 select MII 4 select MII
5 select PHYLIB 5 select PHYLIB
6 6
7config FS_ENET_MPC5121_FEC
8 def_bool y if (FS_ENET && PPC_MPC512x)
9 select FS_ENET_HAS_FEC
10
7config FS_ENET_HAS_SCC 11config FS_ENET_HAS_SCC
8 bool "Chip has an SCC usable for ethernet" 12 bool "Chip has an SCC usable for ethernet"
9 depends on FS_ENET && (CPM1 || CPM2) 13 depends on FS_ENET && (CPM1 || CPM2)
@@ -16,13 +20,13 @@ config FS_ENET_HAS_FCC
16 20
17config FS_ENET_HAS_FEC 21config FS_ENET_HAS_FEC
18 bool "Chip has an FEC usable for ethernet" 22 bool "Chip has an FEC usable for ethernet"
19 depends on FS_ENET && CPM1 23 depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC)
20 select FS_ENET_MDIO_FEC 24 select FS_ENET_MDIO_FEC
21 default y 25 default y
22 26
23config FS_ENET_MDIO_FEC 27config FS_ENET_MDIO_FEC
24 tristate "MDIO driver for FEC" 28 tristate "MDIO driver for FEC"
25 depends on FS_ENET && CPM1 29 depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC)
26 30
27config FS_ENET_MDIO_FCC 31config FS_ENET_MDIO_FCC
28 tristate "MDIO driver for FCC" 32 tristate "MDIO driver for FCC"
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index ec2f5034457f..0770e2f6da6b 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -108,9 +108,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
108 * the last indicator should be set. 108 * the last indicator should be set.
109 */ 109 */
110 if ((sc & BD_ENET_RX_LAST) == 0) 110 if ((sc & BD_ENET_RX_LAST) == 0)
111 printk(KERN_WARNING DRV_MODULE_NAME 111 dev_warn(fep->dev, "rcv is not +last\n");
112 ": %s rcv is not +last\n",
113 dev->name);
114 112
115 /* 113 /*
116 * Check for errors. 114 * Check for errors.
@@ -178,9 +176,8 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
178 received++; 176 received++;
179 netif_receive_skb(skb); 177 netif_receive_skb(skb);
180 } else { 178 } else {
181 printk(KERN_WARNING DRV_MODULE_NAME 179 dev_warn(fep->dev,
182 ": %s Memory squeeze, dropping packet.\n", 180 "Memory squeeze, dropping packet.\n");
183 dev->name);
184 fep->stats.rx_dropped++; 181 fep->stats.rx_dropped++;
185 skbn = skb; 182 skbn = skb;
186 } 183 }
@@ -242,9 +239,7 @@ static int fs_enet_rx_non_napi(struct net_device *dev)
242 * the last indicator should be set. 239 * the last indicator should be set.
243 */ 240 */
244 if ((sc & BD_ENET_RX_LAST) == 0) 241 if ((sc & BD_ENET_RX_LAST) == 0)
245 printk(KERN_WARNING DRV_MODULE_NAME 242 dev_warn(fep->dev, "rcv is not +last\n");
246 ": %s rcv is not +last\n",
247 dev->name);
248 243
249 /* 244 /*
250 * Check for errors. 245 * Check for errors.
@@ -313,9 +308,8 @@ static int fs_enet_rx_non_napi(struct net_device *dev)
313 received++; 308 received++;
314 netif_rx(skb); 309 netif_rx(skb);
315 } else { 310 } else {
316 printk(KERN_WARNING DRV_MODULE_NAME 311 dev_warn(fep->dev,
317 ": %s Memory squeeze, dropping packet.\n", 312 "Memory squeeze, dropping packet.\n");
318 dev->name);
319 fep->stats.rx_dropped++; 313 fep->stats.rx_dropped++;
320 skbn = skb; 314 skbn = skb;
321 } 315 }
@@ -388,10 +382,10 @@ static void fs_enet_tx(struct net_device *dev)
388 } else 382 } else
389 fep->stats.tx_packets++; 383 fep->stats.tx_packets++;
390 384
391 if (sc & BD_ENET_TX_READY) 385 if (sc & BD_ENET_TX_READY) {
392 printk(KERN_WARNING DRV_MODULE_NAME 386 dev_warn(fep->dev,
393 ": %s HEY! Enet xmit interrupt and TX_READY.\n", 387 "HEY! Enet xmit interrupt and TX_READY.\n");
394 dev->name); 388 }
395 389
396 /* 390 /*
397 * Deferred means some collisions occurred during transmit, 391 * Deferred means some collisions occurred during transmit,
@@ -511,9 +505,8 @@ void fs_init_bds(struct net_device *dev)
511 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { 505 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
512 skb = dev_alloc_skb(ENET_RX_FRSIZE); 506 skb = dev_alloc_skb(ENET_RX_FRSIZE);
513 if (skb == NULL) { 507 if (skb == NULL) {
514 printk(KERN_WARNING DRV_MODULE_NAME 508 dev_warn(fep->dev,
515 ": %s Memory squeeze, unable to allocate skb\n", 509 "Memory squeeze, unable to allocate skb\n");
516 dev->name);
517 break; 510 break;
518 } 511 }
519 skb_align(skb, ENET_RX_ALIGN); 512 skb_align(skb, ENET_RX_ALIGN);
@@ -587,6 +580,40 @@ void fs_cleanup_bds(struct net_device *dev)
587 580
588/**********************************************************************************/ 581/**********************************************************************************/
589 582
583#ifdef CONFIG_FS_ENET_MPC5121_FEC
584/*
585 * MPC5121 FEC requeries 4-byte alignment for TX data buffer!
586 */
587static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
588 struct sk_buff *skb)
589{
590 struct sk_buff *new_skb;
591 struct fs_enet_private *fep = netdev_priv(dev);
592
593 /* Alloc new skb */
594 new_skb = dev_alloc_skb(skb->len + 4);
595 if (!new_skb) {
596 if (net_ratelimit()) {
597 dev_warn(fep->dev,
598 "Memory squeeze, dropping tx packet.\n");
599 }
600 return NULL;
601 }
602
603 /* Make sure new skb is properly aligned */
604 skb_align(new_skb, 4);
605
606 /* Copy data to new skb ... */
607 skb_copy_from_linear_data(skb, new_skb->data, skb->len);
608 skb_put(new_skb, skb->len);
609
610 /* ... and free an old one */
611 dev_kfree_skb_any(skb);
612
613 return new_skb;
614}
615#endif
616
590static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 617static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
591{ 618{
592 struct fs_enet_private *fep = netdev_priv(dev); 619 struct fs_enet_private *fep = netdev_priv(dev);
@@ -595,6 +622,19 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
595 u16 sc; 622 u16 sc;
596 unsigned long flags; 623 unsigned long flags;
597 624
625#ifdef CONFIG_FS_ENET_MPC5121_FEC
626 if (((unsigned long)skb->data) & 0x3) {
627 skb = tx_skb_align_workaround(dev, skb);
628 if (!skb) {
629 /*
630 * We have lost packet due to memory allocation error
631 * in tx_skb_align_workaround(). Hopefully original
632 * skb is still valid, so try transmit it later.
633 */
634 return NETDEV_TX_BUSY;
635 }
636 }
637#endif
598 spin_lock_irqsave(&fep->tx_lock, flags); 638 spin_lock_irqsave(&fep->tx_lock, flags);
599 639
600 /* 640 /*
@@ -610,8 +650,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
610 * Ooops. All transmit buffers are full. Bail out. 650 * Ooops. All transmit buffers are full. Bail out.
611 * This should not happen, since the tx queue should be stopped. 651 * This should not happen, since the tx queue should be stopped.
612 */ 652 */
613 printk(KERN_WARNING DRV_MODULE_NAME 653 dev_warn(fep->dev, "tx queue full!.\n");
614 ": %s tx queue full!.\n", dev->name);
615 return NETDEV_TX_BUSY; 654 return NETDEV_TX_BUSY;
616 } 655 }
617 656
@@ -788,8 +827,7 @@ static int fs_enet_open(struct net_device *dev)
788 r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED, 827 r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
789 "fs_enet-mac", dev); 828 "fs_enet-mac", dev);
790 if (r != 0) { 829 if (r != 0) {
791 printk(KERN_ERR DRV_MODULE_NAME 830 dev_err(fep->dev, "Could not allocate FS_ENET IRQ!");
792 ": %s Could not allocate FS_ENET IRQ!", dev->name);
793 if (fep->fpi->use_napi) 831 if (fep->fpi->use_napi)
794 napi_disable(&fep->napi); 832 napi_disable(&fep->napi);
795 return -EINVAL; 833 return -EINVAL;
@@ -1053,7 +1091,7 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
1053 if (ret) 1091 if (ret)
1054 goto out_free_bd; 1092 goto out_free_bd;
1055 1093
1056 printk(KERN_INFO "%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr); 1094 pr_info("%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr);
1057 1095
1058 return 0; 1096 return 0;
1059 1097
@@ -1103,11 +1141,18 @@ static struct of_device_id fs_enet_match[] = {
1103 }, 1141 },
1104#endif 1142#endif
1105#ifdef CONFIG_FS_ENET_HAS_FEC 1143#ifdef CONFIG_FS_ENET_HAS_FEC
1144#ifdef CONFIG_FS_ENET_MPC5121_FEC
1145 {
1146 .compatible = "fsl,mpc5121-fec",
1147 .data = (void *)&fs_fec_ops,
1148 },
1149#else
1106 { 1150 {
1107 .compatible = "fsl,pq1-fec-enet", 1151 .compatible = "fsl,pq1-fec-enet",
1108 .data = (void *)&fs_fec_ops, 1152 .data = (void *)&fs_fec_ops,
1109 }, 1153 },
1110#endif 1154#endif
1155#endif
1111 {} 1156 {}
1112}; 1157};
1113MODULE_DEVICE_TABLE(of, fs_enet_match); 1158MODULE_DEVICE_TABLE(of, fs_enet_match);
diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h
index ef01e09781a5..1ece4b1a689e 100644
--- a/drivers/net/fs_enet/fs_enet.h
+++ b/drivers/net/fs_enet/fs_enet.h
@@ -13,9 +13,56 @@
13 13
14#ifdef CONFIG_CPM1 14#ifdef CONFIG_CPM1
15#include <asm/cpm1.h> 15#include <asm/cpm1.h>
16#endif
17
18#if defined(CONFIG_FS_ENET_HAS_FEC)
19#include <asm/cpm.h>
20
21#if defined(CONFIG_FS_ENET_MPC5121_FEC)
22/* MPC5121 FEC has different register layout */
23struct fec {
24 u32 fec_reserved0;
25 u32 fec_ievent; /* Interrupt event reg */
26 u32 fec_imask; /* Interrupt mask reg */
27 u32 fec_reserved1;
28 u32 fec_r_des_active; /* Receive descriptor reg */
29 u32 fec_x_des_active; /* Transmit descriptor reg */
30 u32 fec_reserved2[3];
31 u32 fec_ecntrl; /* Ethernet control reg */
32 u32 fec_reserved3[6];
33 u32 fec_mii_data; /* MII manage frame reg */
34 u32 fec_mii_speed; /* MII speed control reg */
35 u32 fec_reserved4[7];
36 u32 fec_mib_ctrlstat; /* MIB control/status reg */
37 u32 fec_reserved5[7];
38 u32 fec_r_cntrl; /* Receive control reg */
39 u32 fec_reserved6[15];
40 u32 fec_x_cntrl; /* Transmit Control reg */
41 u32 fec_reserved7[7];
42 u32 fec_addr_low; /* Low 32bits MAC address */
43 u32 fec_addr_high; /* High 16bits MAC address */
44 u32 fec_opd; /* Opcode + Pause duration */
45 u32 fec_reserved8[10];
46 u32 fec_hash_table_high; /* High 32bits hash table */
47 u32 fec_hash_table_low; /* Low 32bits hash table */
48 u32 fec_grp_hash_table_high; /* High 32bits hash table */
49 u32 fec_grp_hash_table_low; /* Low 32bits hash table */
50 u32 fec_reserved9[7];
51 u32 fec_x_wmrk; /* FIFO transmit water mark */
52 u32 fec_reserved10;
53 u32 fec_r_bound; /* FIFO receive bound reg */
54 u32 fec_r_fstart; /* FIFO receive start reg */
55 u32 fec_reserved11[11];
56 u32 fec_r_des_start; /* Receive descriptor ring */
57 u32 fec_x_des_start; /* Transmit descriptor ring */
58 u32 fec_r_buff_size; /* Maximum receive buff size */
59 u32 fec_reserved12[26];
60 u32 fec_dma_control; /* DMA Endian and other ctrl */
61};
62#endif
16 63
17struct fec_info { 64struct fec_info {
18 fec_t __iomem *fecp; 65 struct fec __iomem *fecp;
19 u32 mii_speed; 66 u32 mii_speed;
20}; 67};
21#endif 68#endif
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 22e5a847a588..cf4f674f9e2e 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -218,7 +218,7 @@ static void set_multicast_finish(struct net_device *dev)
218 218
219 /* if all multi or too many multicasts; just enable all */ 219 /* if all multi or too many multicasts; just enable all */
220 if ((dev->flags & IFF_ALLMULTI) != 0 || 220 if ((dev->flags & IFF_ALLMULTI) != 0 ||
221 dev->mc_count > FCC_MAX_MULTICAST_ADDRS) { 221 netdev_mc_count(dev) > FCC_MAX_MULTICAST_ADDRS) {
222 222
223 W32(ep, fen_gaddrh, 0xffffffff); 223 W32(ep, fen_gaddrh, 0xffffffff);
224 W32(ep, fen_gaddrl, 0xffffffff); 224 W32(ep, fen_gaddrl, 0xffffffff);
@@ -235,7 +235,7 @@ static void set_multicast_list(struct net_device *dev)
235 235
236 if ((dev->flags & IFF_PROMISC) == 0) { 236 if ((dev->flags & IFF_PROMISC) == 0) {
237 set_multicast_start(dev); 237 set_multicast_start(dev);
238 for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next) 238 netdev_for_each_mc_addr(pmc, dev)
239 set_multicast_one(dev, pmc->dmi_addr); 239 set_multicast_one(dev, pmc->dmi_addr);
240 set_multicast_finish(dev); 240 set_multicast_finish(dev);
241 } else 241 } else
@@ -476,8 +476,9 @@ static void clear_int_events(struct net_device *dev, u32 int_events)
476 476
477static void ev_error(struct net_device *dev, u32 int_events) 477static void ev_error(struct net_device *dev, u32 int_events)
478{ 478{
479 printk(KERN_WARNING DRV_MODULE_NAME 479 struct fs_enet_private *fep = netdev_priv(dev);
480 ": %s FS_ENET ERROR(s) 0x%x\n", dev->name, int_events); 480
481 dev_warn(fep->dev, "FS_ENET ERROR(s) 0x%x\n", int_events);
481} 482}
482 483
483static int get_regs(struct net_device *dev, void *p, int *sizep) 484static int get_regs(struct net_device *dev, void *p, int *sizep)
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index ca7bcb8ab3a1..cd2c6cca5f24 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -80,7 +80,7 @@
80 */ 80 */
81#define FEC_RESET_DELAY 50 81#define FEC_RESET_DELAY 50
82 82
83static int whack_reset(fec_t __iomem *fecp) 83static int whack_reset(struct fec __iomem *fecp)
84{ 84{
85 int i; 85 int i;
86 86
@@ -168,7 +168,7 @@ static void cleanup_data(struct net_device *dev)
168static void set_promiscuous_mode(struct net_device *dev) 168static void set_promiscuous_mode(struct net_device *dev)
169{ 169{
170 struct fs_enet_private *fep = netdev_priv(dev); 170 struct fs_enet_private *fep = netdev_priv(dev);
171 fec_t __iomem *fecp = fep->fec.fecp; 171 struct fec __iomem *fecp = fep->fec.fecp;
172 172
173 FS(fecp, r_cntrl, FEC_RCNTRL_PROM); 173 FS(fecp, r_cntrl, FEC_RCNTRL_PROM);
174} 174}
@@ -216,11 +216,11 @@ static void set_multicast_one(struct net_device *dev, const u8 *mac)
216static void set_multicast_finish(struct net_device *dev) 216static void set_multicast_finish(struct net_device *dev)
217{ 217{
218 struct fs_enet_private *fep = netdev_priv(dev); 218 struct fs_enet_private *fep = netdev_priv(dev);
219 fec_t __iomem *fecp = fep->fec.fecp; 219 struct fec __iomem *fecp = fep->fec.fecp;
220 220
221 /* if all multi or too many multicasts; just enable all */ 221 /* if all multi or too many multicasts; just enable all */
222 if ((dev->flags & IFF_ALLMULTI) != 0 || 222 if ((dev->flags & IFF_ALLMULTI) != 0 ||
223 dev->mc_count > FEC_MAX_MULTICAST_ADDRS) { 223 netdev_mc_count(dev) > FEC_MAX_MULTICAST_ADDRS) {
224 fep->fec.hthi = 0xffffffffU; 224 fep->fec.hthi = 0xffffffffU;
225 fep->fec.htlo = 0xffffffffU; 225 fep->fec.htlo = 0xffffffffU;
226 } 226 }
@@ -236,7 +236,7 @@ static void set_multicast_list(struct net_device *dev)
236 236
237 if ((dev->flags & IFF_PROMISC) == 0) { 237 if ((dev->flags & IFF_PROMISC) == 0) {
238 set_multicast_start(dev); 238 set_multicast_start(dev);
239 for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next) 239 netdev_for_each_mc_addr(pmc, dev)
240 set_multicast_one(dev, pmc->dmi_addr); 240 set_multicast_one(dev, pmc->dmi_addr);
241 set_multicast_finish(dev); 241 set_multicast_finish(dev);
242 } else 242 } else
@@ -246,7 +246,7 @@ static void set_multicast_list(struct net_device *dev)
246static void restart(struct net_device *dev) 246static void restart(struct net_device *dev)
247{ 247{
248 struct fs_enet_private *fep = netdev_priv(dev); 248 struct fs_enet_private *fep = netdev_priv(dev);
249 fec_t __iomem *fecp = fep->fec.fecp; 249 struct fec __iomem *fecp = fep->fec.fecp;
250 const struct fs_platform_info *fpi = fep->fpi; 250 const struct fs_platform_info *fpi = fep->fpi;
251 dma_addr_t rx_bd_base_phys, tx_bd_base_phys; 251 dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
252 int r; 252 int r;
@@ -257,8 +257,7 @@ static void restart(struct net_device *dev)
257 257
258 r = whack_reset(fep->fec.fecp); 258 r = whack_reset(fep->fec.fecp);
259 if (r != 0) 259 if (r != 0)
260 printk(KERN_ERR DRV_MODULE_NAME 260 dev_err(fep->dev, "FEC Reset FAILED!\n");
261 ": %s FEC Reset FAILED!\n", dev->name);
262 /* 261 /*
263 * Set station address. 262 * Set station address.
264 */ 263 */
@@ -281,7 +280,11 @@ static void restart(struct net_device *dev)
281 * Set maximum receive buffer size. 280 * Set maximum receive buffer size.
282 */ 281 */
283 FW(fecp, r_buff_size, PKT_MAXBLR_SIZE); 282 FW(fecp, r_buff_size, PKT_MAXBLR_SIZE);
283#ifdef CONFIG_FS_ENET_MPC5121_FEC
284 FW(fecp, r_cntrl, PKT_MAXBUF_SIZE << 16);
285#else
284 FW(fecp, r_hash, PKT_MAXBUF_SIZE); 286 FW(fecp, r_hash, PKT_MAXBUF_SIZE);
287#endif
285 288
286 /* get physical address */ 289 /* get physical address */
287 rx_bd_base_phys = fep->ring_mem_addr; 290 rx_bd_base_phys = fep->ring_mem_addr;
@@ -298,7 +301,11 @@ static void restart(struct net_device *dev)
298 /* 301 /*
299 * Enable big endian and don't care about SDMA FC. 302 * Enable big endian and don't care about SDMA FC.
300 */ 303 */
304#ifdef CONFIG_FS_ENET_MPC5121_FEC
305 FS(fecp, dma_control, 0xC0000000);
306#else
301 FW(fecp, fun_code, 0x78000000); 307 FW(fecp, fun_code, 0x78000000);
308#endif
302 309
303 /* 310 /*
304 * Set MII speed. 311 * Set MII speed.
@@ -309,9 +316,17 @@ static void restart(struct net_device *dev)
309 * Clear any outstanding interrupt. 316 * Clear any outstanding interrupt.
310 */ 317 */
311 FW(fecp, ievent, 0xffc0); 318 FW(fecp, ievent, 0xffc0);
319#ifndef CONFIG_FS_ENET_MPC5121_FEC
312 FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29); 320 FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29);
313 321
314 FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ 322 FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
323#else
324 /*
325 * Only set MII mode - do not touch maximum frame length
326 * configured before.
327 */
328 FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE);
329#endif
315 /* 330 /*
316 * adjust to duplex mode 331 * adjust to duplex mode
317 */ 332 */
@@ -340,7 +355,7 @@ static void stop(struct net_device *dev)
340{ 355{
341 struct fs_enet_private *fep = netdev_priv(dev); 356 struct fs_enet_private *fep = netdev_priv(dev);
342 const struct fs_platform_info *fpi = fep->fpi; 357 const struct fs_platform_info *fpi = fep->fpi;
343 fec_t __iomem *fecp = fep->fec.fecp; 358 struct fec __iomem *fecp = fep->fec.fecp;
344 359
345 struct fec_info* feci= fep->phydev->bus->priv; 360 struct fec_info* feci= fep->phydev->bus->priv;
346 361
@@ -355,9 +370,7 @@ static void stop(struct net_device *dev)
355 udelay(1); 370 udelay(1);
356 371
357 if (i == FEC_RESET_DELAY) 372 if (i == FEC_RESET_DELAY)
358 printk(KERN_WARNING DRV_MODULE_NAME 373 dev_warn(fep->dev, "FEC timeout on graceful transmit stop\n");
359 ": %s FEC timeout on graceful transmit stop\n",
360 dev->name);
361 /* 374 /*
362 * Disable FEC. Let only MII interrupts. 375 * Disable FEC. Let only MII interrupts.
363 */ 376 */
@@ -378,7 +391,7 @@ static void stop(struct net_device *dev)
378static void napi_clear_rx_event(struct net_device *dev) 391static void napi_clear_rx_event(struct net_device *dev)
379{ 392{
380 struct fs_enet_private *fep = netdev_priv(dev); 393 struct fs_enet_private *fep = netdev_priv(dev);
381 fec_t __iomem *fecp = fep->fec.fecp; 394 struct fec __iomem *fecp = fep->fec.fecp;
382 395
383 FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK); 396 FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK);
384} 397}
@@ -386,7 +399,7 @@ static void napi_clear_rx_event(struct net_device *dev)
386static void napi_enable_rx(struct net_device *dev) 399static void napi_enable_rx(struct net_device *dev)
387{ 400{
388 struct fs_enet_private *fep = netdev_priv(dev); 401 struct fs_enet_private *fep = netdev_priv(dev);
389 fec_t __iomem *fecp = fep->fec.fecp; 402 struct fec __iomem *fecp = fep->fec.fecp;
390 403
391 FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK); 404 FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
392} 405}
@@ -394,7 +407,7 @@ static void napi_enable_rx(struct net_device *dev)
394static void napi_disable_rx(struct net_device *dev) 407static void napi_disable_rx(struct net_device *dev)
395{ 408{
396 struct fs_enet_private *fep = netdev_priv(dev); 409 struct fs_enet_private *fep = netdev_priv(dev);
397 fec_t __iomem *fecp = fep->fec.fecp; 410 struct fec __iomem *fecp = fep->fec.fecp;
398 411
399 FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK); 412 FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
400} 413}
@@ -402,7 +415,7 @@ static void napi_disable_rx(struct net_device *dev)
402static void rx_bd_done(struct net_device *dev) 415static void rx_bd_done(struct net_device *dev)
403{ 416{
404 struct fs_enet_private *fep = netdev_priv(dev); 417 struct fs_enet_private *fep = netdev_priv(dev);
405 fec_t __iomem *fecp = fep->fec.fecp; 418 struct fec __iomem *fecp = fep->fec.fecp;
406 419
407 FW(fecp, r_des_active, 0x01000000); 420 FW(fecp, r_des_active, 0x01000000);
408} 421}
@@ -410,7 +423,7 @@ static void rx_bd_done(struct net_device *dev)
410static void tx_kickstart(struct net_device *dev) 423static void tx_kickstart(struct net_device *dev)
411{ 424{
412 struct fs_enet_private *fep = netdev_priv(dev); 425 struct fs_enet_private *fep = netdev_priv(dev);
413 fec_t __iomem *fecp = fep->fec.fecp; 426 struct fec __iomem *fecp = fep->fec.fecp;
414 427
415 FW(fecp, x_des_active, 0x01000000); 428 FW(fecp, x_des_active, 0x01000000);
416} 429}
@@ -418,7 +431,7 @@ static void tx_kickstart(struct net_device *dev)
418static u32 get_int_events(struct net_device *dev) 431static u32 get_int_events(struct net_device *dev)
419{ 432{
420 struct fs_enet_private *fep = netdev_priv(dev); 433 struct fs_enet_private *fep = netdev_priv(dev);
421 fec_t __iomem *fecp = fep->fec.fecp; 434 struct fec __iomem *fecp = fep->fec.fecp;
422 435
423 return FR(fecp, ievent) & FR(fecp, imask); 436 return FR(fecp, ievent) & FR(fecp, imask);
424} 437}
@@ -426,32 +439,33 @@ static u32 get_int_events(struct net_device *dev)
426static void clear_int_events(struct net_device *dev, u32 int_events) 439static void clear_int_events(struct net_device *dev, u32 int_events)
427{ 440{
428 struct fs_enet_private *fep = netdev_priv(dev); 441 struct fs_enet_private *fep = netdev_priv(dev);
429 fec_t __iomem *fecp = fep->fec.fecp; 442 struct fec __iomem *fecp = fep->fec.fecp;
430 443
431 FW(fecp, ievent, int_events); 444 FW(fecp, ievent, int_events);
432} 445}
433 446
434static void ev_error(struct net_device *dev, u32 int_events) 447static void ev_error(struct net_device *dev, u32 int_events)
435{ 448{
436 printk(KERN_WARNING DRV_MODULE_NAME 449 struct fs_enet_private *fep = netdev_priv(dev);
437 ": %s FEC ERROR(s) 0x%x\n", dev->name, int_events); 450
451 dev_warn(fep->dev, "FEC ERROR(s) 0x%x\n", int_events);
438} 452}
439 453
440static int get_regs(struct net_device *dev, void *p, int *sizep) 454static int get_regs(struct net_device *dev, void *p, int *sizep)
441{ 455{
442 struct fs_enet_private *fep = netdev_priv(dev); 456 struct fs_enet_private *fep = netdev_priv(dev);
443 457
444 if (*sizep < sizeof(fec_t)) 458 if (*sizep < sizeof(struct fec))
445 return -EINVAL; 459 return -EINVAL;
446 460
447 memcpy_fromio(p, fep->fec.fecp, sizeof(fec_t)); 461 memcpy_fromio(p, fep->fec.fecp, sizeof(struct fec));
448 462
449 return 0; 463 return 0;
450} 464}
451 465
452static int get_regs_len(struct net_device *dev) 466static int get_regs_len(struct net_device *dev)
453{ 467{
454 return sizeof(fec_t); 468 return sizeof(struct fec);
455} 469}
456 470
457static void tx_restart(struct net_device *dev) 471static void tx_restart(struct net_device *dev)
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index 008cdd9cc536..c490a466cae1 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -213,7 +213,7 @@ static void set_multicast_finish(struct net_device *dev)
213 213
214 /* if all multi or too many multicasts; just enable all */ 214 /* if all multi or too many multicasts; just enable all */
215 if ((dev->flags & IFF_ALLMULTI) != 0 || 215 if ((dev->flags & IFF_ALLMULTI) != 0 ||
216 dev->mc_count > SCC_MAX_MULTICAST_ADDRS) { 216 netdev_mc_count(dev) > SCC_MAX_MULTICAST_ADDRS) {
217 217
218 W16(ep, sen_gaddr1, 0xffff); 218 W16(ep, sen_gaddr1, 0xffff);
219 W16(ep, sen_gaddr2, 0xffff); 219 W16(ep, sen_gaddr2, 0xffff);
@@ -228,7 +228,7 @@ static void set_multicast_list(struct net_device *dev)
228 228
229 if ((dev->flags & IFF_PROMISC) == 0) { 229 if ((dev->flags & IFF_PROMISC) == 0) {
230 set_multicast_start(dev); 230 set_multicast_start(dev);
231 for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next) 231 netdev_for_each_mc_addr(pmc, dev)
232 set_multicast_one(dev, pmc->dmi_addr); 232 set_multicast_one(dev, pmc->dmi_addr);
233 set_multicast_finish(dev); 233 set_multicast_finish(dev);
234 } else 234 } else
@@ -367,9 +367,7 @@ static void stop(struct net_device *dev)
367 udelay(1); 367 udelay(1);
368 368
369 if (i == SCC_RESET_DELAY) 369 if (i == SCC_RESET_DELAY)
370 printk(KERN_WARNING DRV_MODULE_NAME 370 dev_warn(fep->dev, "SCC timeout on graceful transmit stop\n");
371 ": %s SCC timeout on graceful transmit stop\n",
372 dev->name);
373 371
374 W16(sccp, scc_sccm, 0); 372 W16(sccp, scc_sccm, 0);
375 C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); 373 C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
@@ -429,8 +427,9 @@ static void clear_int_events(struct net_device *dev, u32 int_events)
429 427
430static void ev_error(struct net_device *dev, u32 int_events) 428static void ev_error(struct net_device *dev, u32 int_events)
431{ 429{
432 printk(KERN_WARNING DRV_MODULE_NAME 430 struct fs_enet_private *fep = netdev_priv(dev);
433 ": %s SCC ERROR(s) 0x%x\n", dev->name, int_events); 431
432 dev_warn(fep->dev, "SCC ERROR(s) 0x%x\n", int_events);
434} 433}
435 434
436static int get_regs(struct net_device *dev, void *p, int *sizep) 435static int get_regs(struct net_device *dev, void *p, int *sizep)
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index 96eba4280c5c..5944b65082cb 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -52,7 +52,7 @@
52static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location) 52static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location)
53{ 53{
54 struct fec_info* fec = bus->priv; 54 struct fec_info* fec = bus->priv;
55 fec_t __iomem *fecp = fec->fecp; 55 struct fec __iomem *fecp = fec->fecp;
56 int i, ret = -1; 56 int i, ret = -1;
57 57
58 BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0); 58 BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0);
@@ -75,7 +75,7 @@ static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location)
75static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val) 75static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val)
76{ 76{
77 struct fec_info* fec = bus->priv; 77 struct fec_info* fec = bus->priv;
78 fec_t __iomem *fecp = fec->fecp; 78 struct fec __iomem *fecp = fec->fecp;
79 int i; 79 int i;
80 80
81 /* this must never happen */ 81 /* this must never happen */
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 8bd3c9f17532..6aa526ee9096 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -2863,11 +2863,11 @@ static void gfar_set_multi(struct net_device *dev)
2863 em_num = 0; 2863 em_num = 0;
2864 } 2864 }
2865 2865
2866 if (dev->mc_count == 0) 2866 if (netdev_mc_empty(dev))
2867 return; 2867 return;
2868 2868
2869 /* Parse the list, and set the appropriate bits */ 2869 /* Parse the list, and set the appropriate bits */
2870 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { 2870 netdev_for_each_mc_addr(mc_ptr, dev) {
2871 if (idx < em_num) { 2871 if (idx < em_num) {
2872 gfar_set_mac_for_addr(dev, idx, 2872 gfar_set_mac_for_addr(dev, idx,
2873 mc_ptr->dmi_addr); 2873 mc_ptr->dmi_addr);
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
new file mode 100644
index 000000000000..2b9c1cbc9ec1
--- /dev/null
+++ b/drivers/net/greth.c
@@ -0,0 +1,1634 @@
1/*
2 * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
3 *
4 * 2005-2009 (c) Aeroflex Gaisler AB
5 *
6 * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
7 * available in the GRLIB VHDL IP core library.
8 *
9 * Full documentation of both cores can be found here:
10 * http://www.gaisler.com/products/grlib/grip.pdf
11 *
12 * The Gigabit version supports scatter/gather DMA, any alignment of
13 * buffers and checksum offloading.
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 *
20 * Contributors: Kristoffer Glembo
21 * Daniel Hellstrom
22 * Marko Isomaki
23 */
24
25#include <linux/module.h>
26#include <linux/uaccess.h>
27#include <linux/init.h>
28#include <linux/netdevice.h>
29#include <linux/etherdevice.h>
30#include <linux/ethtool.h>
31#include <linux/skbuff.h>
32#include <linux/io.h>
33#include <linux/crc32.h>
34#include <linux/mii.h>
35#include <linux/of_device.h>
36#include <linux/of_platform.h>
37#include <asm/cacheflush.h>
38#include <asm/byteorder.h>
39
40#ifdef CONFIG_SPARC
41#include <asm/idprom.h>
42#endif
43
44#include "greth.h"
45
46#define GRETH_DEF_MSG_ENABLE \
47 (NETIF_MSG_DRV | \
48 NETIF_MSG_PROBE | \
49 NETIF_MSG_LINK | \
50 NETIF_MSG_IFDOWN | \
51 NETIF_MSG_IFUP | \
52 NETIF_MSG_RX_ERR | \
53 NETIF_MSG_TX_ERR)
54
55static int greth_debug = -1; /* -1 == use GRETH_DEF_MSG_ENABLE as value */
56module_param(greth_debug, int, 0);
57MODULE_PARM_DESC(greth_debug, "GRETH bitmapped debugging message enable value");
58
59/* Accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
60static int macaddr[6];
61module_param_array(macaddr, int, NULL, 0);
62MODULE_PARM_DESC(macaddr, "GRETH Ethernet MAC address");
63
64static int greth_edcl = 1;
65module_param(greth_edcl, int, 0);
66MODULE_PARM_DESC(greth_edcl, "GRETH EDCL usage indicator. Set to 1 if EDCL is used.");
67
68static int greth_open(struct net_device *dev);
69static netdev_tx_t greth_start_xmit(struct sk_buff *skb,
70 struct net_device *dev);
71static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb,
72 struct net_device *dev);
73static int greth_rx(struct net_device *dev, int limit);
74static int greth_rx_gbit(struct net_device *dev, int limit);
75static void greth_clean_tx(struct net_device *dev);
76static void greth_clean_tx_gbit(struct net_device *dev);
77static irqreturn_t greth_interrupt(int irq, void *dev_id);
78static int greth_close(struct net_device *dev);
79static int greth_set_mac_add(struct net_device *dev, void *p);
80static void greth_set_multicast_list(struct net_device *dev);
81
82#define GRETH_REGLOAD(a) (be32_to_cpu(__raw_readl(&(a))))
83#define GRETH_REGSAVE(a, v) (__raw_writel(cpu_to_be32(v), &(a)))
84#define GRETH_REGORIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) | (v))))
85#define GRETH_REGANDIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) & (v))))
86
87#define NEXT_TX(N) (((N) + 1) & GRETH_TXBD_NUM_MASK)
88#define SKIP_TX(N, C) (((N) + C) & GRETH_TXBD_NUM_MASK)
89#define NEXT_RX(N) (((N) + 1) & GRETH_RXBD_NUM_MASK)
90
91static void greth_print_rx_packet(void *addr, int len)
92{
93 print_hex_dump(KERN_DEBUG, "RX: ", DUMP_PREFIX_OFFSET, 16, 1,
94 addr, len, true);
95}
96
97static void greth_print_tx_packet(struct sk_buff *skb)
98{
99 int i;
100 int length;
101
102 if (skb_shinfo(skb)->nr_frags == 0)
103 length = skb->len;
104 else
105 length = skb_headlen(skb);
106
107 print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
108 skb->data, length, true);
109
110 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
111
112 print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
113 phys_to_virt(page_to_phys(skb_shinfo(skb)->frags[i].page)) +
114 skb_shinfo(skb)->frags[i].page_offset,
115 length, true);
116 }
117}
118
119static inline void greth_enable_tx(struct greth_private *greth)
120{
121 wmb();
122 GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
123}
124
125static inline void greth_disable_tx(struct greth_private *greth)
126{
127 GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
128}
129
130static inline void greth_enable_rx(struct greth_private *greth)
131{
132 wmb();
133 GRETH_REGORIN(greth->regs->control, GRETH_RXEN);
134}
135
136static inline void greth_disable_rx(struct greth_private *greth)
137{
138 GRETH_REGANDIN(greth->regs->control, ~GRETH_RXEN);
139}
140
141static inline void greth_enable_irqs(struct greth_private *greth)
142{
143 GRETH_REGORIN(greth->regs->control, GRETH_RXI | GRETH_TXI);
144}
145
146static inline void greth_disable_irqs(struct greth_private *greth)
147{
148 GRETH_REGANDIN(greth->regs->control, ~(GRETH_RXI|GRETH_TXI));
149}
150
151static inline void greth_write_bd(u32 *bd, u32 val)
152{
153 __raw_writel(cpu_to_be32(val), bd);
154}
155
156static inline u32 greth_read_bd(u32 *bd)
157{
158 return be32_to_cpu(__raw_readl(bd));
159}
160
161static void greth_clean_rings(struct greth_private *greth)
162{
163 int i;
164 struct greth_bd *rx_bdp = greth->rx_bd_base;
165 struct greth_bd *tx_bdp = greth->tx_bd_base;
166
167 if (greth->gbit_mac) {
168
169 /* Free and unmap RX buffers */
170 for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
171 if (greth->rx_skbuff[i] != NULL) {
172 dev_kfree_skb(greth->rx_skbuff[i]);
173 dma_unmap_single(greth->dev,
174 greth_read_bd(&rx_bdp->addr),
175 MAX_FRAME_SIZE+NET_IP_ALIGN,
176 DMA_FROM_DEVICE);
177 }
178 }
179
180 /* TX buffers */
181 while (greth->tx_free < GRETH_TXBD_NUM) {
182
183 struct sk_buff *skb = greth->tx_skbuff[greth->tx_last];
184 int nr_frags = skb_shinfo(skb)->nr_frags;
185 tx_bdp = greth->tx_bd_base + greth->tx_last;
186 greth->tx_last = NEXT_TX(greth->tx_last);
187
188 dma_unmap_single(greth->dev,
189 greth_read_bd(&tx_bdp->addr),
190 skb_headlen(skb),
191 DMA_TO_DEVICE);
192
193 for (i = 0; i < nr_frags; i++) {
194 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
195 tx_bdp = greth->tx_bd_base + greth->tx_last;
196
197 dma_unmap_page(greth->dev,
198 greth_read_bd(&tx_bdp->addr),
199 frag->size,
200 DMA_TO_DEVICE);
201
202 greth->tx_last = NEXT_TX(greth->tx_last);
203 }
204 greth->tx_free += nr_frags+1;
205 dev_kfree_skb(skb);
206 }
207
208
209 } else { /* 10/100 Mbps MAC */
210
211 for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
212 kfree(greth->rx_bufs[i]);
213 dma_unmap_single(greth->dev,
214 greth_read_bd(&rx_bdp->addr),
215 MAX_FRAME_SIZE,
216 DMA_FROM_DEVICE);
217 }
218 for (i = 0; i < GRETH_TXBD_NUM; i++, tx_bdp++) {
219 kfree(greth->tx_bufs[i]);
220 dma_unmap_single(greth->dev,
221 greth_read_bd(&tx_bdp->addr),
222 MAX_FRAME_SIZE,
223 DMA_TO_DEVICE);
224 }
225 }
226}
227
228static int greth_init_rings(struct greth_private *greth)
229{
230 struct sk_buff *skb;
231 struct greth_bd *rx_bd, *tx_bd;
232 u32 dma_addr;
233 int i;
234
235 rx_bd = greth->rx_bd_base;
236 tx_bd = greth->tx_bd_base;
237
238 /* Initialize descriptor rings and buffers */
239 if (greth->gbit_mac) {
240
241 for (i = 0; i < GRETH_RXBD_NUM; i++) {
242 skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN);
243 if (skb == NULL) {
244 if (netif_msg_ifup(greth))
245 dev_err(greth->dev, "Error allocating DMA ring.\n");
246 goto cleanup;
247 }
248 skb_reserve(skb, NET_IP_ALIGN);
249 dma_addr = dma_map_single(greth->dev,
250 skb->data,
251 MAX_FRAME_SIZE+NET_IP_ALIGN,
252 DMA_FROM_DEVICE);
253
254 if (dma_mapping_error(greth->dev, dma_addr)) {
255 if (netif_msg_ifup(greth))
256 dev_err(greth->dev, "Could not create initial DMA mapping\n");
257 goto cleanup;
258 }
259 greth->rx_skbuff[i] = skb;
260 greth_write_bd(&rx_bd[i].addr, dma_addr);
261 greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
262 }
263
264 } else {
265
266 /* 10/100 MAC uses a fixed set of buffers and copy to/from SKBs */
267 for (i = 0; i < GRETH_RXBD_NUM; i++) {
268
269 greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
270
271 if (greth->rx_bufs[i] == NULL) {
272 if (netif_msg_ifup(greth))
273 dev_err(greth->dev, "Error allocating DMA ring.\n");
274 goto cleanup;
275 }
276
277 dma_addr = dma_map_single(greth->dev,
278 greth->rx_bufs[i],
279 MAX_FRAME_SIZE,
280 DMA_FROM_DEVICE);
281
282 if (dma_mapping_error(greth->dev, dma_addr)) {
283 if (netif_msg_ifup(greth))
284 dev_err(greth->dev, "Could not create initial DMA mapping\n");
285 goto cleanup;
286 }
287 greth_write_bd(&rx_bd[i].addr, dma_addr);
288 greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
289 }
290 for (i = 0; i < GRETH_TXBD_NUM; i++) {
291
292 greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
293
294 if (greth->tx_bufs[i] == NULL) {
295 if (netif_msg_ifup(greth))
296 dev_err(greth->dev, "Error allocating DMA ring.\n");
297 goto cleanup;
298 }
299
300 dma_addr = dma_map_single(greth->dev,
301 greth->tx_bufs[i],
302 MAX_FRAME_SIZE,
303 DMA_TO_DEVICE);
304
305 if (dma_mapping_error(greth->dev, dma_addr)) {
306 if (netif_msg_ifup(greth))
307 dev_err(greth->dev, "Could not create initial DMA mapping\n");
308 goto cleanup;
309 }
310 greth_write_bd(&tx_bd[i].addr, dma_addr);
311 greth_write_bd(&tx_bd[i].stat, 0);
312 }
313 }
314 greth_write_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat,
315 greth_read_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat) | GRETH_BD_WR);
316
317 /* Initialize pointers. */
318 greth->rx_cur = 0;
319 greth->tx_next = 0;
320 greth->tx_last = 0;
321 greth->tx_free = GRETH_TXBD_NUM;
322
323 /* Initialize descriptor base address */
324 GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys);
325 GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys);
326
327 return 0;
328
329cleanup:
330 greth_clean_rings(greth);
331 return -ENOMEM;
332}
333
334static int greth_open(struct net_device *dev)
335{
336 struct greth_private *greth = netdev_priv(dev);
337 int err;
338
339 err = greth_init_rings(greth);
340 if (err) {
341 if (netif_msg_ifup(greth))
342 dev_err(&dev->dev, "Could not allocate memory for DMA rings\n");
343 return err;
344 }
345
346 err = request_irq(greth->irq, greth_interrupt, 0, "eth", (void *) dev);
347 if (err) {
348 if (netif_msg_ifup(greth))
349 dev_err(&dev->dev, "Could not allocate interrupt %d\n", dev->irq);
350 greth_clean_rings(greth);
351 return err;
352 }
353
354 if (netif_msg_ifup(greth))
355 dev_dbg(&dev->dev, " starting queue\n");
356 netif_start_queue(dev);
357
358 napi_enable(&greth->napi);
359
360 greth_enable_irqs(greth);
361 greth_enable_tx(greth);
362 greth_enable_rx(greth);
363 return 0;
364
365}
366
367static int greth_close(struct net_device *dev)
368{
369 struct greth_private *greth = netdev_priv(dev);
370
371 napi_disable(&greth->napi);
372
373 greth_disable_tx(greth);
374
375 netif_stop_queue(dev);
376
377 free_irq(greth->irq, (void *) dev);
378
379 greth_clean_rings(greth);
380
381 return 0;
382}
383
384static netdev_tx_t
385greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
386{
387 struct greth_private *greth = netdev_priv(dev);
388 struct greth_bd *bdp;
389 int err = NETDEV_TX_OK;
390 u32 status, dma_addr;
391
392 bdp = greth->tx_bd_base + greth->tx_next;
393
394 if (unlikely(greth->tx_free <= 0)) {
395 netif_stop_queue(dev);
396 return NETDEV_TX_BUSY;
397 }
398
399 if (netif_msg_pktdata(greth))
400 greth_print_tx_packet(skb);
401
402
403 if (unlikely(skb->len > MAX_FRAME_SIZE)) {
404 dev->stats.tx_errors++;
405 goto out;
406 }
407
408 dma_addr = greth_read_bd(&bdp->addr);
409
410 memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
411
412 dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
413
414 status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN);
415
416 /* Wrap around descriptor ring */
417 if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
418 status |= GRETH_BD_WR;
419 }
420
421 greth->tx_next = NEXT_TX(greth->tx_next);
422 greth->tx_free--;
423
424 /* No more descriptors */
425 if (unlikely(greth->tx_free == 0)) {
426
427 /* Free transmitted descriptors */
428 greth_clean_tx(dev);
429
430 /* If nothing was cleaned, stop queue & wait for irq */
431 if (unlikely(greth->tx_free == 0)) {
432 status |= GRETH_BD_IE;
433 netif_stop_queue(dev);
434 }
435 }
436
437 /* Write descriptor control word and enable transmission */
438 greth_write_bd(&bdp->stat, status);
439 greth_enable_tx(greth);
440
441out:
442 dev_kfree_skb(skb);
443 return err;
444}
445
446
447static netdev_tx_t
448greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
449{
450 struct greth_private *greth = netdev_priv(dev);
451 struct greth_bd *bdp;
452 u32 status = 0, dma_addr;
453 int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
454
455 nr_frags = skb_shinfo(skb)->nr_frags;
456
457 if (greth->tx_free < nr_frags + 1) {
458 netif_stop_queue(dev);
459 err = NETDEV_TX_BUSY;
460 goto out;
461 }
462
463 if (netif_msg_pktdata(greth))
464 greth_print_tx_packet(skb);
465
466 if (unlikely(skb->len > MAX_FRAME_SIZE)) {
467 dev->stats.tx_errors++;
468 goto out;
469 }
470
471 /* Save skb pointer. */
472 greth->tx_skbuff[greth->tx_next] = skb;
473
474 /* Linear buf */
475 if (nr_frags != 0)
476 status = GRETH_TXBD_MORE;
477
478 status |= GRETH_TXBD_CSALL;
479 status |= skb_headlen(skb) & GRETH_BD_LEN;
480 if (greth->tx_next == GRETH_TXBD_NUM_MASK)
481 status |= GRETH_BD_WR;
482
483
484 bdp = greth->tx_bd_base + greth->tx_next;
485 greth_write_bd(&bdp->stat, status);
486 dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
487
488 if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
489 goto map_error;
490
491 greth_write_bd(&bdp->addr, dma_addr);
492
493 curr_tx = NEXT_TX(greth->tx_next);
494
495 /* Frags */
496 for (i = 0; i < nr_frags; i++) {
497 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
498 greth->tx_skbuff[curr_tx] = NULL;
499 bdp = greth->tx_bd_base + curr_tx;
500
501 status = GRETH_TXBD_CSALL;
502 status |= frag->size & GRETH_BD_LEN;
503
504 /* Wrap around descriptor ring */
505 if (curr_tx == GRETH_TXBD_NUM_MASK)
506 status |= GRETH_BD_WR;
507
508 /* More fragments left */
509 if (i < nr_frags - 1)
510 status |= GRETH_TXBD_MORE;
511
512 /* ... last fragment, check if out of descriptors */
513 else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) {
514
515 /* Enable interrupts and stop queue */
516 status |= GRETH_BD_IE;
517 netif_stop_queue(dev);
518 }
519
520 greth_write_bd(&bdp->stat, status);
521
522 dma_addr = dma_map_page(greth->dev,
523 frag->page,
524 frag->page_offset,
525 frag->size,
526 DMA_TO_DEVICE);
527
528 if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
529 goto frag_map_error;
530
531 greth_write_bd(&bdp->addr, dma_addr);
532
533 curr_tx = NEXT_TX(curr_tx);
534 }
535
536 wmb();
537
538 /* Enable the descriptors that we configured ... */
539 for (i = 0; i < nr_frags + 1; i++) {
540 bdp = greth->tx_bd_base + greth->tx_next;
541 greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
542 greth->tx_next = NEXT_TX(greth->tx_next);
543 greth->tx_free--;
544 }
545
546 greth_enable_tx(greth);
547
548 return NETDEV_TX_OK;
549
550frag_map_error:
551 /* Unmap SKB mappings that succeeded */
552 for (i = 0; greth->tx_next + i != curr_tx; i++) {
553 bdp = greth->tx_bd_base + greth->tx_next + i;
554 dma_unmap_single(greth->dev,
555 greth_read_bd(&bdp->addr),
556 greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
557 DMA_TO_DEVICE);
558 }
559map_error:
560 if (net_ratelimit())
561 dev_warn(greth->dev, "Could not create TX DMA mapping\n");
562 dev_kfree_skb(skb);
563out:
564 return err;
565}
566
567
568static irqreturn_t greth_interrupt(int irq, void *dev_id)
569{
570 struct net_device *dev = dev_id;
571 struct greth_private *greth;
572 u32 status;
573 irqreturn_t retval = IRQ_NONE;
574
575 greth = netdev_priv(dev);
576
577 spin_lock(&greth->devlock);
578
579 /* Get the interrupt events that caused us to be here. */
580 status = GRETH_REGLOAD(greth->regs->status);
581
582 /* Handle rx and tx interrupts through poll */
583 if (status & (GRETH_INT_RX | GRETH_INT_TX)) {
584
585 /* Clear interrupt status */
586 GRETH_REGORIN(greth->regs->status,
587 status & (GRETH_INT_RX | GRETH_INT_TX));
588
589 retval = IRQ_HANDLED;
590
591 /* Disable interrupts and schedule poll() */
592 greth_disable_irqs(greth);
593 napi_schedule(&greth->napi);
594 }
595
596 mmiowb();
597 spin_unlock(&greth->devlock);
598
599 return retval;
600}
601
602static void greth_clean_tx(struct net_device *dev)
603{
604 struct greth_private *greth;
605 struct greth_bd *bdp;
606 u32 stat;
607
608 greth = netdev_priv(dev);
609
610 while (1) {
611 bdp = greth->tx_bd_base + greth->tx_last;
612 stat = greth_read_bd(&bdp->stat);
613
614 if (unlikely(stat & GRETH_BD_EN))
615 break;
616
617 if (greth->tx_free == GRETH_TXBD_NUM)
618 break;
619
620 /* Check status for errors */
621 if (unlikely(stat & GRETH_TXBD_STATUS)) {
622 dev->stats.tx_errors++;
623 if (stat & GRETH_TXBD_ERR_AL)
624 dev->stats.tx_aborted_errors++;
625 if (stat & GRETH_TXBD_ERR_UE)
626 dev->stats.tx_fifo_errors++;
627 }
628 dev->stats.tx_packets++;
629 greth->tx_last = NEXT_TX(greth->tx_last);
630 greth->tx_free++;
631 }
632
633 if (greth->tx_free > 0) {
634 netif_wake_queue(dev);
635 }
636
637}
638
639static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
640{
641 /* Check status for errors */
642 if (unlikely(stat & GRETH_TXBD_STATUS)) {
643 dev->stats.tx_errors++;
644 if (stat & GRETH_TXBD_ERR_AL)
645 dev->stats.tx_aborted_errors++;
646 if (stat & GRETH_TXBD_ERR_UE)
647 dev->stats.tx_fifo_errors++;
648 if (stat & GRETH_TXBD_ERR_LC)
649 dev->stats.tx_aborted_errors++;
650 }
651 dev->stats.tx_packets++;
652}
653
654static void greth_clean_tx_gbit(struct net_device *dev)
655{
656 struct greth_private *greth;
657 struct greth_bd *bdp, *bdp_last_frag;
658 struct sk_buff *skb;
659 u32 stat;
660 int nr_frags, i;
661
662 greth = netdev_priv(dev);
663
664 while (greth->tx_free < GRETH_TXBD_NUM) {
665
666 skb = greth->tx_skbuff[greth->tx_last];
667
668 nr_frags = skb_shinfo(skb)->nr_frags;
669
670 /* We only clean fully completed SKBs */
671 bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
672 stat = bdp_last_frag->stat;
673
674 if (stat & GRETH_BD_EN)
675 break;
676
677 greth->tx_skbuff[greth->tx_last] = NULL;
678
679 greth_update_tx_stats(dev, stat);
680
681 bdp = greth->tx_bd_base + greth->tx_last;
682
683 greth->tx_last = NEXT_TX(greth->tx_last);
684
685 dma_unmap_single(greth->dev,
686 greth_read_bd(&bdp->addr),
687 skb_headlen(skb),
688 DMA_TO_DEVICE);
689
690 for (i = 0; i < nr_frags; i++) {
691 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
692 bdp = greth->tx_bd_base + greth->tx_last;
693
694 dma_unmap_page(greth->dev,
695 greth_read_bd(&bdp->addr),
696 frag->size,
697 DMA_TO_DEVICE);
698
699 greth->tx_last = NEXT_TX(greth->tx_last);
700 }
701 greth->tx_free += nr_frags+1;
702 dev_kfree_skb(skb);
703 }
704 if (greth->tx_free > (MAX_SKB_FRAGS + 1)) {
705 netif_wake_queue(dev);
706 }
707}
708
709static int greth_pending_packets(struct greth_private *greth)
710{
711 struct greth_bd *bdp;
712 u32 status;
713 bdp = greth->rx_bd_base + greth->rx_cur;
714 status = greth_read_bd(&bdp->stat);
715 if (status & GRETH_BD_EN)
716 return 0;
717 else
718 return 1;
719}
720
721static int greth_rx(struct net_device *dev, int limit)
722{
723 struct greth_private *greth;
724 struct greth_bd *bdp;
725 struct sk_buff *skb;
726 int pkt_len;
727 int bad, count;
728 u32 status, dma_addr;
729
730 greth = netdev_priv(dev);
731
732 for (count = 0; count < limit; ++count) {
733
734 bdp = greth->rx_bd_base + greth->rx_cur;
735 status = greth_read_bd(&bdp->stat);
736 dma_addr = greth_read_bd(&bdp->addr);
737 bad = 0;
738
739 if (unlikely(status & GRETH_BD_EN)) {
740 break;
741 }
742
743 /* Check status for errors. */
744 if (unlikely(status & GRETH_RXBD_STATUS)) {
745 if (status & GRETH_RXBD_ERR_FT) {
746 dev->stats.rx_length_errors++;
747 bad = 1;
748 }
749 if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) {
750 dev->stats.rx_frame_errors++;
751 bad = 1;
752 }
753 if (status & GRETH_RXBD_ERR_CRC) {
754 dev->stats.rx_crc_errors++;
755 bad = 1;
756 }
757 }
758 if (unlikely(bad)) {
759 dev->stats.rx_errors++;
760
761 } else {
762
763 pkt_len = status & GRETH_BD_LEN;
764
765 skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
766
767 if (unlikely(skb == NULL)) {
768
769 if (net_ratelimit())
770 dev_warn(&dev->dev, "low on memory - " "packet dropped\n");
771
772 dev->stats.rx_dropped++;
773
774 } else {
775 skb_reserve(skb, NET_IP_ALIGN);
776 skb->dev = dev;
777
778 dma_sync_single_for_cpu(greth->dev,
779 dma_addr,
780 pkt_len,
781 DMA_FROM_DEVICE);
782
783 if (netif_msg_pktdata(greth))
784 greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);
785
786 memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
787
788 skb->protocol = eth_type_trans(skb, dev);
789 dev->stats.rx_packets++;
790 netif_receive_skb(skb);
791 }
792 }
793
794 status = GRETH_BD_EN | GRETH_BD_IE;
795 if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
796 status |= GRETH_BD_WR;
797 }
798
799 wmb();
800 greth_write_bd(&bdp->stat, status);
801
802 dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
803
804 greth_enable_rx(greth);
805
806 greth->rx_cur = NEXT_RX(greth->rx_cur);
807 }
808
809 return count;
810}
811
812static inline int hw_checksummed(u32 status)
813{
814
815 if (status & GRETH_RXBD_IP_FRAG)
816 return 0;
817
818 if (status & GRETH_RXBD_IP && status & GRETH_RXBD_IP_CSERR)
819 return 0;
820
821 if (status & GRETH_RXBD_UDP && status & GRETH_RXBD_UDP_CSERR)
822 return 0;
823
824 if (status & GRETH_RXBD_TCP && status & GRETH_RXBD_TCP_CSERR)
825 return 0;
826
827 return 1;
828}
829
830static int greth_rx_gbit(struct net_device *dev, int limit)
831{
832 struct greth_private *greth;
833 struct greth_bd *bdp;
834 struct sk_buff *skb, *newskb;
835 int pkt_len;
836 int bad, count = 0;
837 u32 status, dma_addr;
838
839 greth = netdev_priv(dev);
840
841 for (count = 0; count < limit; ++count) {
842
843 bdp = greth->rx_bd_base + greth->rx_cur;
844 skb = greth->rx_skbuff[greth->rx_cur];
845 status = greth_read_bd(&bdp->stat);
846 bad = 0;
847
848 if (status & GRETH_BD_EN)
849 break;
850
851 /* Check status for errors. */
852 if (unlikely(status & GRETH_RXBD_STATUS)) {
853
854 if (status & GRETH_RXBD_ERR_FT) {
855 dev->stats.rx_length_errors++;
856 bad = 1;
857 } else if (status &
858 (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) {
859 dev->stats.rx_frame_errors++;
860 bad = 1;
861 } else if (status & GRETH_RXBD_ERR_CRC) {
862 dev->stats.rx_crc_errors++;
863 bad = 1;
864 }
865 }
866
867 /* Allocate new skb to replace current */
868 newskb = netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN);
869
870 if (!bad && newskb) {
871 skb_reserve(newskb, NET_IP_ALIGN);
872
873 dma_addr = dma_map_single(greth->dev,
874 newskb->data,
875 MAX_FRAME_SIZE + NET_IP_ALIGN,
876 DMA_FROM_DEVICE);
877
878 if (!dma_mapping_error(greth->dev, dma_addr)) {
879 /* Process the incoming frame. */
880 pkt_len = status & GRETH_BD_LEN;
881
882 dma_unmap_single(greth->dev,
883 greth_read_bd(&bdp->addr),
884 MAX_FRAME_SIZE + NET_IP_ALIGN,
885 DMA_FROM_DEVICE);
886
887 if (netif_msg_pktdata(greth))
888 greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len);
889
890 skb_put(skb, pkt_len);
891
892 if (greth->flags & GRETH_FLAG_RX_CSUM && hw_checksummed(status))
893 skb->ip_summed = CHECKSUM_UNNECESSARY;
894 else
895 skb->ip_summed = CHECKSUM_NONE;
896
897 skb->dev = dev;
898 skb->protocol = eth_type_trans(skb, dev);
899 dev->stats.rx_packets++;
900 netif_receive_skb(skb);
901
902 greth->rx_skbuff[greth->rx_cur] = newskb;
903 greth_write_bd(&bdp->addr, dma_addr);
904 } else {
905 if (net_ratelimit())
906 dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
907 dev_kfree_skb(newskb);
908 dev->stats.rx_dropped++;
909 }
910 } else {
911 if (net_ratelimit())
912 dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
913 dev->stats.rx_dropped++;
914 }
915
916 status = GRETH_BD_EN | GRETH_BD_IE;
917 if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
918 status |= GRETH_BD_WR;
919 }
920
921 wmb();
922 greth_write_bd(&bdp->stat, status);
923 greth_enable_rx(greth);
924 greth->rx_cur = NEXT_RX(greth->rx_cur);
925 }
926
927 return count;
928
929}
930
931static int greth_poll(struct napi_struct *napi, int budget)
932{
933 struct greth_private *greth;
934 int work_done = 0;
935 greth = container_of(napi, struct greth_private, napi);
936
937 if (greth->gbit_mac) {
938 greth_clean_tx_gbit(greth->netdev);
939 } else {
940 greth_clean_tx(greth->netdev);
941 }
942
943restart_poll:
944 if (greth->gbit_mac) {
945 work_done += greth_rx_gbit(greth->netdev, budget - work_done);
946 } else {
947 work_done += greth_rx(greth->netdev, budget - work_done);
948 }
949
950 if (work_done < budget) {
951
952 napi_complete(napi);
953
954 if (greth_pending_packets(greth)) {
955 napi_reschedule(napi);
956 goto restart_poll;
957 }
958 }
959
960 greth_enable_irqs(greth);
961 return work_done;
962}
963
964static int greth_set_mac_add(struct net_device *dev, void *p)
965{
966 struct sockaddr *addr = p;
967 struct greth_private *greth;
968 struct greth_regs *regs;
969
970 greth = netdev_priv(dev);
971 regs = (struct greth_regs *) greth->regs;
972
973 if (!is_valid_ether_addr(addr->sa_data))
974 return -EINVAL;
975
976 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
977
978 GRETH_REGSAVE(regs->esa_msb, addr->sa_data[0] << 8 | addr->sa_data[1]);
979 GRETH_REGSAVE(regs->esa_lsb,
980 addr->sa_data[2] << 24 | addr->
981 sa_data[3] << 16 | addr->sa_data[4] << 8 | addr->sa_data[5]);
982 return 0;
983}
984
985static u32 greth_hash_get_index(__u8 *addr)
986{
987 return (ether_crc(6, addr)) & 0x3F;
988}
989
990static void greth_set_hash_filter(struct net_device *dev)
991{
992 struct dev_mc_list *curr;
993 struct greth_private *greth = netdev_priv(dev);
994 struct greth_regs *regs = (struct greth_regs *) greth->regs;
995 u32 mc_filter[2];
996 unsigned int bitnr;
997
998 mc_filter[0] = mc_filter[1] = 0;
999
1000 netdev_for_each_mc_addr(curr, dev) {
1001 bitnr = greth_hash_get_index(curr->dmi_addr);
1002 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
1003 }
1004
1005 GRETH_REGSAVE(regs->hash_msb, mc_filter[1]);
1006 GRETH_REGSAVE(regs->hash_lsb, mc_filter[0]);
1007}
1008
1009static void greth_set_multicast_list(struct net_device *dev)
1010{
1011 int cfg;
1012 struct greth_private *greth = netdev_priv(dev);
1013 struct greth_regs *regs = (struct greth_regs *) greth->regs;
1014
1015 cfg = GRETH_REGLOAD(regs->control);
1016 if (dev->flags & IFF_PROMISC)
1017 cfg |= GRETH_CTRL_PR;
1018 else
1019 cfg &= ~GRETH_CTRL_PR;
1020
1021 if (greth->multicast) {
1022 if (dev->flags & IFF_ALLMULTI) {
1023 GRETH_REGSAVE(regs->hash_msb, -1);
1024 GRETH_REGSAVE(regs->hash_lsb, -1);
1025 cfg |= GRETH_CTRL_MCEN;
1026 GRETH_REGSAVE(regs->control, cfg);
1027 return;
1028 }
1029
1030 if (netdev_mc_empty(dev)) {
1031 cfg &= ~GRETH_CTRL_MCEN;
1032 GRETH_REGSAVE(regs->control, cfg);
1033 return;
1034 }
1035
1036 /* Setup multicast filter */
1037 greth_set_hash_filter(dev);
1038 cfg |= GRETH_CTRL_MCEN;
1039 }
1040 GRETH_REGSAVE(regs->control, cfg);
1041}
1042
1043static u32 greth_get_msglevel(struct net_device *dev)
1044{
1045 struct greth_private *greth = netdev_priv(dev);
1046 return greth->msg_enable;
1047}
1048
1049static void greth_set_msglevel(struct net_device *dev, u32 value)
1050{
1051 struct greth_private *greth = netdev_priv(dev);
1052 greth->msg_enable = value;
1053}
1054static int greth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1055{
1056 struct greth_private *greth = netdev_priv(dev);
1057 struct phy_device *phy = greth->phy;
1058
1059 if (!phy)
1060 return -ENODEV;
1061
1062 return phy_ethtool_gset(phy, cmd);
1063}
1064
1065static int greth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1066{
1067 struct greth_private *greth = netdev_priv(dev);
1068 struct phy_device *phy = greth->phy;
1069
1070 if (!phy)
1071 return -ENODEV;
1072
1073 return phy_ethtool_sset(phy, cmd);
1074}
1075
1076static int greth_get_regs_len(struct net_device *dev)
1077{
1078 return sizeof(struct greth_regs);
1079}
1080
1081static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1082{
1083 struct greth_private *greth = netdev_priv(dev);
1084
1085 strncpy(info->driver, dev_driver_string(greth->dev), 32);
1086 strncpy(info->version, "revision: 1.0", 32);
1087 strncpy(info->bus_info, greth->dev->bus->name, 32);
1088 strncpy(info->fw_version, "N/A", 32);
1089 info->eedump_len = 0;
1090 info->regdump_len = sizeof(struct greth_regs);
1091}
1092
1093static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
1094{
1095 int i;
1096 struct greth_private *greth = netdev_priv(dev);
1097 u32 __iomem *greth_regs = (u32 __iomem *) greth->regs;
1098 u32 *buff = p;
1099
1100 for (i = 0; i < sizeof(struct greth_regs) / sizeof(u32); i++)
1101 buff[i] = greth_read_bd(&greth_regs[i]);
1102}
1103
1104static u32 greth_get_rx_csum(struct net_device *dev)
1105{
1106 struct greth_private *greth = netdev_priv(dev);
1107 return (greth->flags & GRETH_FLAG_RX_CSUM) != 0;
1108}
1109
1110static int greth_set_rx_csum(struct net_device *dev, u32 data)
1111{
1112 struct greth_private *greth = netdev_priv(dev);
1113
1114 spin_lock_bh(&greth->devlock);
1115
1116 if (data)
1117 greth->flags |= GRETH_FLAG_RX_CSUM;
1118 else
1119 greth->flags &= ~GRETH_FLAG_RX_CSUM;
1120
1121 spin_unlock_bh(&greth->devlock);
1122
1123 return 0;
1124}
1125
1126static u32 greth_get_tx_csum(struct net_device *dev)
1127{
1128 return (dev->features & NETIF_F_IP_CSUM) != 0;
1129}
1130
1131static int greth_set_tx_csum(struct net_device *dev, u32 data)
1132{
1133 netif_tx_lock_bh(dev);
1134 ethtool_op_set_tx_csum(dev, data);
1135 netif_tx_unlock_bh(dev);
1136 return 0;
1137}
1138
1139static const struct ethtool_ops greth_ethtool_ops = {
1140 .get_msglevel = greth_get_msglevel,
1141 .set_msglevel = greth_set_msglevel,
1142 .get_settings = greth_get_settings,
1143 .set_settings = greth_set_settings,
1144 .get_drvinfo = greth_get_drvinfo,
1145 .get_regs_len = greth_get_regs_len,
1146 .get_regs = greth_get_regs,
1147 .get_rx_csum = greth_get_rx_csum,
1148 .set_rx_csum = greth_set_rx_csum,
1149 .get_tx_csum = greth_get_tx_csum,
1150 .set_tx_csum = greth_set_tx_csum,
1151 .get_link = ethtool_op_get_link,
1152};
1153
1154static struct net_device_ops greth_netdev_ops = {
1155 .ndo_open = greth_open,
1156 .ndo_stop = greth_close,
1157 .ndo_start_xmit = greth_start_xmit,
1158 .ndo_set_mac_address = greth_set_mac_add,
1159 .ndo_validate_addr = eth_validate_addr,
1160};
1161
1162static inline int wait_for_mdio(struct greth_private *greth)
1163{
1164 unsigned long timeout = jiffies + 4*HZ/100;
1165 while (GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_BUSY) {
1166 if (time_after(jiffies, timeout))
1167 return 0;
1168 }
1169 return 1;
1170}
1171
1172static int greth_mdio_read(struct mii_bus *bus, int phy, int reg)
1173{
1174 struct greth_private *greth = bus->priv;
1175 int data;
1176
1177 if (!wait_for_mdio(greth))
1178 return -EBUSY;
1179
1180 GRETH_REGSAVE(greth->regs->mdio, ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 2);
1181
1182 if (!wait_for_mdio(greth))
1183 return -EBUSY;
1184
1185 if (!(GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_NVALID)) {
1186 data = (GRETH_REGLOAD(greth->regs->mdio) >> 16) & 0xFFFF;
1187 return data;
1188
1189 } else {
1190 return -1;
1191 }
1192}
1193
1194static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
1195{
1196 struct greth_private *greth = bus->priv;
1197
1198 if (!wait_for_mdio(greth))
1199 return -EBUSY;
1200
1201 GRETH_REGSAVE(greth->regs->mdio,
1202 ((val & 0xFFFF) << 16) | ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 1);
1203
1204 if (!wait_for_mdio(greth))
1205 return -EBUSY;
1206
1207 return 0;
1208}
1209
1210static int greth_mdio_reset(struct mii_bus *bus)
1211{
1212 return 0;
1213}
1214
1215static void greth_link_change(struct net_device *dev)
1216{
1217 struct greth_private *greth = netdev_priv(dev);
1218 struct phy_device *phydev = greth->phy;
1219 unsigned long flags;
1220
1221 int status_change = 0;
1222
1223 spin_lock_irqsave(&greth->devlock, flags);
1224
1225 if (phydev->link) {
1226
1227 if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
1228
1229 GRETH_REGANDIN(greth->regs->control,
1230 ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB));
1231
1232 if (phydev->duplex)
1233 GRETH_REGORIN(greth->regs->control, GRETH_CTRL_FD);
1234
1235 if (phydev->speed == SPEED_100) {
1236
1237 GRETH_REGORIN(greth->regs->control, GRETH_CTRL_SP);
1238 }
1239
1240 else if (phydev->speed == SPEED_1000)
1241 GRETH_REGORIN(greth->regs->control, GRETH_CTRL_GB);
1242
1243 greth->speed = phydev->speed;
1244 greth->duplex = phydev->duplex;
1245 status_change = 1;
1246 }
1247 }
1248
1249 if (phydev->link != greth->link) {
1250 if (!phydev->link) {
1251 greth->speed = 0;
1252 greth->duplex = -1;
1253 }
1254 greth->link = phydev->link;
1255
1256 status_change = 1;
1257 }
1258
1259 spin_unlock_irqrestore(&greth->devlock, flags);
1260
1261 if (status_change) {
1262 if (phydev->link)
1263 pr_debug("%s: link up (%d/%s)\n",
1264 dev->name, phydev->speed,
1265 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
1266 else
1267 pr_debug("%s: link down\n", dev->name);
1268 }
1269}
1270
1271static int greth_mdio_probe(struct net_device *dev)
1272{
1273 struct greth_private *greth = netdev_priv(dev);
1274 struct phy_device *phy = NULL;
1275 int ret;
1276
1277 /* Find the first PHY */
1278 phy = phy_find_first(greth->mdio);
1279
1280 if (!phy) {
1281 if (netif_msg_probe(greth))
1282 dev_err(&dev->dev, "no PHY found\n");
1283 return -ENXIO;
1284 }
1285
1286 ret = phy_connect_direct(dev, phy, &greth_link_change,
1287 0, greth->gbit_mac ?
1288 PHY_INTERFACE_MODE_GMII :
1289 PHY_INTERFACE_MODE_MII);
1290 if (ret) {
1291 if (netif_msg_ifup(greth))
1292 dev_err(&dev->dev, "could not attach to PHY\n");
1293 return ret;
1294 }
1295
1296 if (greth->gbit_mac)
1297 phy->supported &= PHY_GBIT_FEATURES;
1298 else
1299 phy->supported &= PHY_BASIC_FEATURES;
1300
1301 phy->advertising = phy->supported;
1302
1303 greth->link = 0;
1304 greth->speed = 0;
1305 greth->duplex = -1;
1306 greth->phy = phy;
1307
1308 return 0;
1309}
1310
1311static inline int phy_aneg_done(struct phy_device *phydev)
1312{
1313 int retval;
1314
1315 retval = phy_read(phydev, MII_BMSR);
1316
1317 return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
1318}
1319
1320static int greth_mdio_init(struct greth_private *greth)
1321{
1322 int ret, phy;
1323 unsigned long timeout;
1324
1325 greth->mdio = mdiobus_alloc();
1326 if (!greth->mdio) {
1327 return -ENOMEM;
1328 }
1329
1330 greth->mdio->name = "greth-mdio";
1331 snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq);
1332 greth->mdio->read = greth_mdio_read;
1333 greth->mdio->write = greth_mdio_write;
1334 greth->mdio->reset = greth_mdio_reset;
1335 greth->mdio->priv = greth;
1336
1337 greth->mdio->irq = greth->mdio_irqs;
1338
1339 for (phy = 0; phy < PHY_MAX_ADDR; phy++)
1340 greth->mdio->irq[phy] = PHY_POLL;
1341
1342 ret = mdiobus_register(greth->mdio);
1343 if (ret) {
1344 goto error;
1345 }
1346
1347 ret = greth_mdio_probe(greth->netdev);
1348 if (ret) {
1349 if (netif_msg_probe(greth))
1350 dev_err(&greth->netdev->dev, "failed to probe MDIO bus\n");
1351 goto unreg_mdio;
1352 }
1353
1354 phy_start(greth->phy);
1355
1356 /* If Ethernet debug link is used make autoneg happen right away */
1357 if (greth->edcl && greth_edcl == 1) {
1358 phy_start_aneg(greth->phy);
1359 timeout = jiffies + 6*HZ;
1360 while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) {
1361 }
1362 genphy_read_status(greth->phy);
1363 greth_link_change(greth->netdev);
1364 }
1365
1366 return 0;
1367
1368unreg_mdio:
1369 mdiobus_unregister(greth->mdio);
1370error:
1371 mdiobus_free(greth->mdio);
1372 return ret;
1373}
1374
1375/* Initialize the GRETH MAC */
1376static int __devinit greth_of_probe(struct of_device *ofdev, const struct of_device_id *match)
1377{
1378 struct net_device *dev;
1379 struct greth_private *greth;
1380 struct greth_regs *regs;
1381
1382 int i;
1383 int err;
1384 int tmp;
1385 unsigned long timeout;
1386
1387 dev = alloc_etherdev(sizeof(struct greth_private));
1388
1389 if (dev == NULL)
1390 return -ENOMEM;
1391
1392 greth = netdev_priv(dev);
1393 greth->netdev = dev;
1394 greth->dev = &ofdev->dev;
1395
1396 if (greth_debug > 0)
1397 greth->msg_enable = greth_debug;
1398 else
1399 greth->msg_enable = GRETH_DEF_MSG_ENABLE;
1400
1401 spin_lock_init(&greth->devlock);
1402
1403 greth->regs = of_ioremap(&ofdev->resource[0], 0,
1404 resource_size(&ofdev->resource[0]),
1405 "grlib-greth regs");
1406
1407 if (greth->regs == NULL) {
1408 if (netif_msg_probe(greth))
1409 dev_err(greth->dev, "ioremap failure.\n");
1410 err = -EIO;
1411 goto error1;
1412 }
1413
1414 regs = (struct greth_regs *) greth->regs;
1415 greth->irq = ofdev->irqs[0];
1416
1417 dev_set_drvdata(greth->dev, dev);
1418 SET_NETDEV_DEV(dev, greth->dev);
1419
1420 if (netif_msg_probe(greth))
1421 dev_dbg(greth->dev, "reseting controller.\n");
1422
1423 /* Reset the controller. */
1424 GRETH_REGSAVE(regs->control, GRETH_RESET);
1425
1426 /* Wait for MAC to reset itself */
1427 timeout = jiffies + HZ/100;
1428 while (GRETH_REGLOAD(regs->control) & GRETH_RESET) {
1429 if (time_after(jiffies, timeout)) {
1430 err = -EIO;
1431 if (netif_msg_probe(greth))
1432 dev_err(greth->dev, "timeout when waiting for reset.\n");
1433 goto error2;
1434 }
1435 }
1436
1437 /* Get default PHY address */
1438 greth->phyaddr = (GRETH_REGLOAD(regs->mdio) >> 11) & 0x1F;
1439
1440 /* Check if we have GBIT capable MAC */
1441 tmp = GRETH_REGLOAD(regs->control);
1442 greth->gbit_mac = (tmp >> 27) & 1;
1443
1444 /* Check for multicast capability */
1445 greth->multicast = (tmp >> 25) & 1;
1446
1447 greth->edcl = (tmp >> 31) & 1;
1448
1449 /* If we have EDCL we disable the EDCL speed-duplex FSM so
1450 * it doesn't interfere with the software */
1451 if (greth->edcl != 0)
1452 GRETH_REGORIN(regs->control, GRETH_CTRL_DISDUPLEX);
1453
1454 /* Check if MAC can handle MDIO interrupts */
1455 greth->mdio_int_en = (tmp >> 26) & 1;
1456
1457 err = greth_mdio_init(greth);
1458 if (err) {
1459 if (netif_msg_probe(greth))
1460 dev_err(greth->dev, "failed to register MDIO bus\n");
1461 goto error2;
1462 }
1463
1464 /* Allocate TX descriptor ring in coherent memory */
1465 greth->tx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
1466 1024,
1467 &greth->tx_bd_base_phys,
1468 GFP_KERNEL);
1469
1470 if (!greth->tx_bd_base) {
1471 if (netif_msg_probe(greth))
1472 dev_err(&dev->dev, "could not allocate descriptor memory.\n");
1473 err = -ENOMEM;
1474 goto error3;
1475 }
1476
1477 memset(greth->tx_bd_base, 0, 1024);
1478
1479 /* Allocate RX descriptor ring in coherent memory */
1480 greth->rx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
1481 1024,
1482 &greth->rx_bd_base_phys,
1483 GFP_KERNEL);
1484
1485 if (!greth->rx_bd_base) {
1486 if (netif_msg_probe(greth))
1487 dev_err(greth->dev, "could not allocate descriptor memory.\n");
1488 err = -ENOMEM;
1489 goto error4;
1490 }
1491
1492 memset(greth->rx_bd_base, 0, 1024);
1493
1494 /* Get MAC address from: module param, OF property or ID prom */
1495 for (i = 0; i < 6; i++) {
1496 if (macaddr[i] != 0)
1497 break;
1498 }
1499 if (i == 6) {
1500 const unsigned char *addr;
1501 int len;
1502 addr = of_get_property(ofdev->node, "local-mac-address", &len);
1503 if (addr != NULL && len == 6) {
1504 for (i = 0; i < 6; i++)
1505 macaddr[i] = (unsigned int) addr[i];
1506 } else {
1507#ifdef CONFIG_SPARC
1508 for (i = 0; i < 6; i++)
1509 macaddr[i] = (unsigned int) idprom->id_ethaddr[i];
1510#endif
1511 }
1512 }
1513
1514 for (i = 0; i < 6; i++)
1515 dev->dev_addr[i] = macaddr[i];
1516
1517 macaddr[5]++;
1518
1519 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
1520 if (netif_msg_probe(greth))
1521 dev_err(greth->dev, "no valid ethernet address, aborting.\n");
1522 err = -EINVAL;
1523 goto error5;
1524 }
1525
1526 GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
1527 GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
1528 dev->dev_addr[4] << 8 | dev->dev_addr[5]);
1529
1530 /* Clear all pending interrupts except PHY irq */
1531 GRETH_REGSAVE(regs->status, 0xFF);
1532
1533 if (greth->gbit_mac) {
1534 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HIGHDMA;
1535 greth_netdev_ops.ndo_start_xmit = greth_start_xmit_gbit;
1536 greth->flags = GRETH_FLAG_RX_CSUM;
1537 }
1538
1539 if (greth->multicast) {
1540 greth_netdev_ops.ndo_set_multicast_list = greth_set_multicast_list;
1541 dev->flags |= IFF_MULTICAST;
1542 } else {
1543 dev->flags &= ~IFF_MULTICAST;
1544 }
1545
1546 dev->netdev_ops = &greth_netdev_ops;
1547 dev->ethtool_ops = &greth_ethtool_ops;
1548
1549 if (register_netdev(dev)) {
1550 if (netif_msg_probe(greth))
1551 dev_err(greth->dev, "netdevice registration failed.\n");
1552 err = -ENOMEM;
1553 goto error5;
1554 }
1555
1556 /* setup NAPI */
1557 memset(&greth->napi, 0, sizeof(greth->napi));
1558 netif_napi_add(dev, &greth->napi, greth_poll, 64);
1559
1560 return 0;
1561
1562error5:
1563 dma_free_coherent(greth->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
1564error4:
1565 dma_free_coherent(greth->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
1566error3:
1567 mdiobus_unregister(greth->mdio);
1568error2:
1569 of_iounmap(&ofdev->resource[0], greth->regs, resource_size(&ofdev->resource[0]));
1570error1:
1571 free_netdev(dev);
1572 return err;
1573}
1574
1575static int __devexit greth_of_remove(struct of_device *of_dev)
1576{
1577 struct net_device *ndev = dev_get_drvdata(&of_dev->dev);
1578 struct greth_private *greth = netdev_priv(ndev);
1579
1580 /* Free descriptor areas */
1581 dma_free_coherent(&of_dev->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
1582
1583 dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
1584
1585 dev_set_drvdata(&of_dev->dev, NULL);
1586
1587 if (greth->phy)
1588 phy_stop(greth->phy);
1589 mdiobus_unregister(greth->mdio);
1590
1591 unregister_netdev(ndev);
1592 free_netdev(ndev);
1593
1594 of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
1595
1596 return 0;
1597}
1598
1599static struct of_device_id greth_of_match[] = {
1600 {
1601 .name = "GAISLER_ETHMAC",
1602 },
1603 {},
1604};
1605
1606MODULE_DEVICE_TABLE(of, greth_of_match);
1607
1608static struct of_platform_driver greth_of_driver = {
1609 .name = "grlib-greth",
1610 .match_table = greth_of_match,
1611 .probe = greth_of_probe,
1612 .remove = __devexit_p(greth_of_remove),
1613 .driver = {
1614 .owner = THIS_MODULE,
1615 .name = "grlib-greth",
1616 },
1617};
1618
1619static int __init greth_init(void)
1620{
1621 return of_register_platform_driver(&greth_of_driver);
1622}
1623
1624static void __exit greth_cleanup(void)
1625{
1626 of_unregister_platform_driver(&greth_of_driver);
1627}
1628
1629module_init(greth_init);
1630module_exit(greth_cleanup);
1631
1632MODULE_AUTHOR("Aeroflex Gaisler AB.");
1633MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
1634MODULE_LICENSE("GPL");
diff --git a/drivers/net/greth.h b/drivers/net/greth.h
new file mode 100644
index 000000000000..973388d6abca
--- /dev/null
+++ b/drivers/net/greth.h
@@ -0,0 +1,143 @@
1#ifndef GRETH_H
2#define GRETH_H
3
4#include <linux/phy.h>
5
6/* Register bits and masks */
7#define GRETH_RESET 0x40
8#define GRETH_MII_BUSY 0x8
9#define GRETH_MII_NVALID 0x10
10
11#define GRETH_CTRL_FD 0x10
12#define GRETH_CTRL_PR 0x20
13#define GRETH_CTRL_SP 0x80
14#define GRETH_CTRL_GB 0x100
15#define GRETH_CTRL_PSTATIEN 0x400
16#define GRETH_CTRL_MCEN 0x800
17#define GRETH_CTRL_DISDUPLEX 0x1000
18#define GRETH_STATUS_PHYSTAT 0x100
19
20#define GRETH_BD_EN 0x800
21#define GRETH_BD_WR 0x1000
22#define GRETH_BD_IE 0x2000
23#define GRETH_BD_LEN 0x7FF
24
25#define GRETH_TXEN 0x1
26#define GRETH_INT_TX 0x8
27#define GRETH_TXI 0x4
28#define GRETH_TXBD_STATUS 0x0001C000
29#define GRETH_TXBD_MORE 0x20000
30#define GRETH_TXBD_IPCS 0x40000
31#define GRETH_TXBD_TCPCS 0x80000
32#define GRETH_TXBD_UDPCS 0x100000
33#define GRETH_TXBD_CSALL (GRETH_TXBD_IPCS | GRETH_TXBD_TCPCS | GRETH_TXBD_UDPCS)
34#define GRETH_TXBD_ERR_LC 0x10000
35#define GRETH_TXBD_ERR_UE 0x4000
36#define GRETH_TXBD_ERR_AL 0x8000
37
38#define GRETH_INT_RX 0x4
39#define GRETH_RXEN 0x2
40#define GRETH_RXI 0x8
41#define GRETH_RXBD_STATUS 0xFFFFC000
42#define GRETH_RXBD_ERR_AE 0x4000
43#define GRETH_RXBD_ERR_FT 0x8000
44#define GRETH_RXBD_ERR_CRC 0x10000
45#define GRETH_RXBD_ERR_OE 0x20000
46#define GRETH_RXBD_ERR_LE 0x40000
47#define GRETH_RXBD_IP 0x80000
48#define GRETH_RXBD_IP_CSERR 0x100000
49#define GRETH_RXBD_UDP 0x200000
50#define GRETH_RXBD_UDP_CSERR 0x400000
51#define GRETH_RXBD_TCP 0x800000
52#define GRETH_RXBD_TCP_CSERR 0x1000000
53#define GRETH_RXBD_IP_FRAG 0x2000000
54#define GRETH_RXBD_MCAST 0x4000000
55
56/* Descriptor parameters */
57#define GRETH_TXBD_NUM 128
58#define GRETH_TXBD_NUM_MASK (GRETH_TXBD_NUM-1)
59#define GRETH_TX_BUF_SIZE 2048
60#define GRETH_RXBD_NUM 128
61#define GRETH_RXBD_NUM_MASK (GRETH_RXBD_NUM-1)
62#define GRETH_RX_BUF_SIZE 2048
63
64/* Buffers per page */
65#define GRETH_RX_BUF_PPGAE (PAGE_SIZE/GRETH_RX_BUF_SIZE)
66#define GRETH_TX_BUF_PPGAE (PAGE_SIZE/GRETH_TX_BUF_SIZE)
67
68/* How many pages are needed for buffers */
69#define GRETH_RX_BUF_PAGE_NUM (GRETH_RXBD_NUM/GRETH_RX_BUF_PPGAE)
70#define GRETH_TX_BUF_PAGE_NUM (GRETH_TXBD_NUM/GRETH_TX_BUF_PPGAE)
71
72/* Buffer size.
73 * Gbit MAC uses tagged maximum frame size which is 1518 excluding CRC.
74 * Set to 1520 to make all buffers word aligned for non-gbit MAC.
75 */
76#define MAX_FRAME_SIZE 1520
77
78/* Flags */
79#define GRETH_FLAG_RX_CSUM 0x1
80
81/* GRETH APB registers */
82struct greth_regs {
83 u32 control;
84 u32 status;
85 u32 esa_msb;
86 u32 esa_lsb;
87 u32 mdio;
88 u32 tx_desc_p;
89 u32 rx_desc_p;
90 u32 edclip;
91 u32 hash_msb;
92 u32 hash_lsb;
93};
94
95/* GRETH buffer descriptor */
96struct greth_bd {
97 u32 stat;
98 u32 addr;
99};
100
101struct greth_private {
102 struct sk_buff *rx_skbuff[GRETH_RXBD_NUM];
103 struct sk_buff *tx_skbuff[GRETH_TXBD_NUM];
104
105 unsigned char *tx_bufs[GRETH_TXBD_NUM];
106 unsigned char *rx_bufs[GRETH_RXBD_NUM];
107
108 u16 tx_next;
109 u16 tx_last;
110 u16 tx_free;
111 u16 rx_cur;
112
113 struct greth_regs *regs; /* Address of controller registers. */
114 struct greth_bd *rx_bd_base; /* Address of Rx BDs. */
115 struct greth_bd *tx_bd_base; /* Address of Tx BDs. */
116 dma_addr_t rx_bd_base_phys;
117 dma_addr_t tx_bd_base_phys;
118
119 int irq;
120
121 struct device *dev; /* Pointer to of_device->dev */
122 struct net_device *netdev;
123 struct napi_struct napi;
124 spinlock_t devlock;
125
126 struct phy_device *phy;
127 struct mii_bus *mdio;
128 int mdio_irqs[PHY_MAX_ADDR];
129 unsigned int link;
130 unsigned int speed;
131 unsigned int duplex;
132
133 u32 msg_enable;
134 u32 flags;
135
136 u8 phyaddr;
137 u8 multicast;
138 u8 gbit_mac;
139 u8 mdio_int_en;
140 u8 edcl;
141};
142
143#endif
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index ea85075a89a2..373546dd0831 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1854,17 +1854,18 @@ static void set_rx_mode(struct net_device *dev)
1854 1854
1855 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1855 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1856 writew(0x000F, ioaddr + AddrMode); 1856 writew(0x000F, ioaddr + AddrMode);
1857 } else if ((dev->mc_count > 63) || (dev->flags & IFF_ALLMULTI)) { 1857 } else if ((netdev_mc_count(dev) > 63) || (dev->flags & IFF_ALLMULTI)) {
1858 /* Too many to match, or accept all multicasts. */ 1858 /* Too many to match, or accept all multicasts. */
1859 writew(0x000B, ioaddr + AddrMode); 1859 writew(0x000B, ioaddr + AddrMode);
1860 } else if (dev->mc_count > 0) { /* Must use the CAM filter. */ 1860 } else if (!netdev_mc_empty(dev)) { /* Must use the CAM filter. */
1861 struct dev_mc_list *mclist; 1861 struct dev_mc_list *mclist;
1862 int i; 1862 int i = 0;
1863 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1863
1864 i++, mclist = mclist->next) { 1864 netdev_for_each_mc_addr(mclist, dev) {
1865 writel(*(u32*)(mclist->dmi_addr), ioaddr + 0x100 + i*8); 1865 writel(*(u32*)(mclist->dmi_addr), ioaddr + 0x100 + i*8);
1866 writel(0x20000 | (*(u16*)&mclist->dmi_addr[4]), 1866 writel(0x20000 | (*(u16*)&mclist->dmi_addr[4]),
1867 ioaddr + 0x104 + i*8); 1867 ioaddr + 0x104 + i*8);
1868 i++;
1868 } 1869 }
1869 /* Clear remaining entries. */ 1870 /* Clear remaining entries. */
1870 for (; i < 64; i++) 1871 for (; i < 64; i++)
@@ -1990,7 +1991,7 @@ static void __devexit hamachi_remove_one (struct pci_dev *pdev)
1990 } 1991 }
1991} 1992}
1992 1993
1993static struct pci_device_id hamachi_pci_tbl[] = { 1994static DEFINE_PCI_DEVICE_TABLE(hamachi_pci_tbl) = {
1994 { 0x1318, 0x0911, PCI_ANY_ID, PCI_ANY_ID, }, 1995 { 0x1318, 0x0911, PCI_ANY_ID, PCI_ANY_ID, },
1995 { 0, } 1996 { 0, }
1996}; 1997};
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 90f890e7c5e1..b766a69bf0ca 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -210,7 +210,7 @@ MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl);
210#endif 210#endif
211 211
212#ifdef CONFIG_PCI 212#ifdef CONFIG_PCI
213static struct pci_device_id hp100_pci_tbl[] = { 213static DEFINE_PCI_DEVICE_TABLE(hp100_pci_tbl) = {
214 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,}, 214 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,},
215 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,}, 215 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,},
216 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2970A, PCI_ANY_ID, PCI_ANY_ID,}, 216 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2970A, PCI_ANY_ID, PCI_ANY_ID,},
@@ -2090,7 +2090,7 @@ static void hp100_set_multicast_list(struct net_device *dev)
2090 lp->mac2_mode = HP100_MAC2MODE6; /* promiscuous mode = get all good */ 2090 lp->mac2_mode = HP100_MAC2MODE6; /* promiscuous mode = get all good */
2091 lp->mac1_mode = HP100_MAC1MODE6; /* packets on the net */ 2091 lp->mac1_mode = HP100_MAC1MODE6; /* packets on the net */
2092 memset(&lp->hash_bytes, 0xff, 8); 2092 memset(&lp->hash_bytes, 0xff, 8);
2093 } else if (dev->mc_count || (dev->flags & IFF_ALLMULTI)) { 2093 } else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) {
2094 lp->mac2_mode = HP100_MAC2MODE5; /* multicast mode = get packets for */ 2094 lp->mac2_mode = HP100_MAC2MODE5; /* multicast mode = get packets for */
2095 lp->mac1_mode = HP100_MAC1MODE5; /* me, broadcasts and all multicasts */ 2095 lp->mac1_mode = HP100_MAC1MODE5; /* me, broadcasts and all multicasts */
2096#ifdef HP100_MULTICAST_FILTER /* doesn't work!!! */ 2096#ifdef HP100_MULTICAST_FILTER /* doesn't work!!! */
@@ -2098,22 +2098,23 @@ static void hp100_set_multicast_list(struct net_device *dev)
2098 /* set hash filter to receive all multicast packets */ 2098 /* set hash filter to receive all multicast packets */
2099 memset(&lp->hash_bytes, 0xff, 8); 2099 memset(&lp->hash_bytes, 0xff, 8);
2100 } else { 2100 } else {
2101 int i, j, idx; 2101 int i, idx;
2102 u_char *addrs; 2102 u_char *addrs;
2103 struct dev_mc_list *dmi; 2103 struct dev_mc_list *dmi;
2104 2104
2105 memset(&lp->hash_bytes, 0x00, 8); 2105 memset(&lp->hash_bytes, 0x00, 8);
2106#ifdef HP100_DEBUG 2106#ifdef HP100_DEBUG
2107 printk("hp100: %s: computing hash filter - mc_count = %i\n", dev->name, dev->mc_count); 2107 printk("hp100: %s: computing hash filter - mc_count = %i\n",
2108 dev->name, netdev_mc_count(dev));
2108#endif 2109#endif
2109 for (i = 0, dmi = dev->mc_list; i < dev->mc_count; i++, dmi = dmi->next) { 2110 netdev_for_each_mc_addr(dmi, dev) {
2110 addrs = dmi->dmi_addr; 2111 addrs = dmi->dmi_addr;
2111 if ((*addrs & 0x01) == 0x01) { /* multicast address? */ 2112 if ((*addrs & 0x01) == 0x01) { /* multicast address? */
2112#ifdef HP100_DEBUG 2113#ifdef HP100_DEBUG
2113 printk("hp100: %s: multicast = %pM, ", 2114 printk("hp100: %s: multicast = %pM, ",
2114 dev->name, addrs); 2115 dev->name, addrs);
2115#endif 2116#endif
2116 for (j = idx = 0; j < 6; j++) { 2117 for (i = idx = 0; i < 6; i++) {
2117 idx ^= *addrs++ & 0x3f; 2118 idx ^= *addrs++ & 0x3f;
2118 printk(":%02x:", idx); 2119 printk(":%02x:", idx);
2119 } 2120 }
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index fb5e019169ee..fb0ac6d7c040 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -391,11 +391,11 @@ static void emac_hash_mc(struct emac_instance *dev)
391 struct dev_mc_list *dmi; 391 struct dev_mc_list *dmi;
392 int i; 392 int i;
393 393
394 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count); 394 DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
395 395
396 memset(gaht_temp, 0, sizeof (gaht_temp)); 396 memset(gaht_temp, 0, sizeof (gaht_temp));
397 397
398 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) { 398 netdev_for_each_mc_addr(dmi, dev->ndev) {
399 int slot, reg, mask; 399 int slot, reg, mask;
400 DBG2(dev, "mc %pM" NL, dmi->dmi_addr); 400 DBG2(dev, "mc %pM" NL, dmi->dmi_addr);
401 401
@@ -425,9 +425,9 @@ static inline u32 emac_iff2rmr(struct net_device *ndev)
425 if (ndev->flags & IFF_PROMISC) 425 if (ndev->flags & IFF_PROMISC)
426 r |= EMAC_RMR_PME; 426 r |= EMAC_RMR_PME;
427 else if (ndev->flags & IFF_ALLMULTI || 427 else if (ndev->flags & IFF_ALLMULTI ||
428 (ndev->mc_count > EMAC_XAHT_SLOTS(dev))) 428 (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
429 r |= EMAC_RMR_PMME; 429 r |= EMAC_RMR_PMME;
430 else if (ndev->mc_count > 0) 430 else if (!netdev_mc_empty(ndev))
431 r |= EMAC_RMR_MAE; 431 r |= EMAC_RMR_MAE;
432 432
433 return r; 433 return r;
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 052c74091d91..b5d0f4e973f7 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -420,7 +420,7 @@ static void InitBoard(struct net_device *dev)
420 /* start putting the multicast addresses into the CAM list. Stop if 420 /* start putting the multicast addresses into the CAM list. Stop if
421 it is full. */ 421 it is full. */
422 422
423 for (mcptr = dev->mc_list; mcptr != NULL; mcptr = mcptr->next) { 423 netdev_for_each_mc_addr(mcptr, dev) {
424 putcam(cams, &camcnt, mcptr->dmi_addr); 424 putcam(cams, &camcnt, mcptr->dmi_addr);
425 if (camcnt == 16) 425 if (camcnt == 16)
426 break; 426 break;
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index a86693906ac8..f2b937966950 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1062,7 +1062,8 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1062 struct ibmveth_adapter *adapter = netdev_priv(netdev); 1062 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1063 unsigned long lpar_rc; 1063 unsigned long lpar_rc;
1064 1064
1065 if((netdev->flags & IFF_PROMISC) || (netdev->mc_count > adapter->mcastFilterSize)) { 1065 if ((netdev->flags & IFF_PROMISC) ||
1066 (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1066 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 1067 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1067 IbmVethMcastEnableRecv | 1068 IbmVethMcastEnableRecv |
1068 IbmVethMcastDisableFiltering, 1069 IbmVethMcastDisableFiltering,
@@ -1071,8 +1072,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1071 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc); 1072 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
1072 } 1073 }
1073 } else { 1074 } else {
1074 struct dev_mc_list *mclist = netdev->mc_list; 1075 struct dev_mc_list *mclist;
1075 int i;
1076 /* clear the filter table & disable filtering */ 1076 /* clear the filter table & disable filtering */
1077 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 1077 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1078 IbmVethMcastEnableRecv | 1078 IbmVethMcastEnableRecv |
@@ -1083,7 +1083,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1083 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc); 1083 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
1084 } 1084 }
1085 /* add the addresses to the filter table */ 1085 /* add the addresses to the filter table */
1086 for(i = 0; i < netdev->mc_count; ++i, mclist = mclist->next) { 1086 netdev_for_each_mc_addr(mclist, netdev) {
1087 // add the multicast address to the filter table 1087 // add the multicast address to the filter table
1088 unsigned long mcast_addr = 0; 1088 unsigned long mcast_addr = 0;
1089 memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6); 1089 memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6);
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index c505b50d1fa3..9d7fa2fb85ea 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -727,6 +727,34 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
727} 727}
728 728
729/** 729/**
730 * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown
731 * @hw: pointer to the HW structure
732 **/
733void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
734{
735 u32 reg;
736
737
738 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
739 !igb_sgmii_active_82575(hw))
740 return;
741
742 /* Enable PCS to turn on link */
743 reg = rd32(E1000_PCS_CFG0);
744 reg |= E1000_PCS_CFG_PCS_EN;
745 wr32(E1000_PCS_CFG0, reg);
746
747 /* Power up the laser */
748 reg = rd32(E1000_CTRL_EXT);
749 reg &= ~E1000_CTRL_EXT_SDP3_DATA;
750 wr32(E1000_CTRL_EXT, reg);
751
752 /* flush the write to verify completion */
753 wrfl();
754 msleep(1);
755}
756
757/**
730 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex 758 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
731 * @hw: pointer to the HW structure 759 * @hw: pointer to the HW structure
732 * @speed: stores the current speed 760 * @speed: stores the current speed
@@ -791,27 +819,12 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
791void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) 819void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
792{ 820{
793 u32 reg; 821 u32 reg;
794 u16 eeprom_data = 0;
795 822
796 if (hw->phy.media_type != e1000_media_type_internal_serdes || 823 if (hw->phy.media_type != e1000_media_type_internal_serdes &&
797 igb_sgmii_active_82575(hw)) 824 igb_sgmii_active_82575(hw))
798 return; 825 return;
799 826
800 if (hw->bus.func == E1000_FUNC_0) 827 if (!igb_enable_mng_pass_thru(hw)) {
801 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
802 else if (hw->mac.type == e1000_82580)
803 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
804 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
805 &eeprom_data);
806 else if (hw->bus.func == E1000_FUNC_1)
807 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
808
809 /*
810 * If APM is not enabled in the EEPROM and management interface is
811 * not enabled, then power down.
812 */
813 if (!(eeprom_data & E1000_NVM_APME_82575) &&
814 !igb_enable_mng_pass_thru(hw)) {
815 /* Disable PCS to turn off link */ 828 /* Disable PCS to turn off link */
816 reg = rd32(E1000_PCS_CFG0); 829 reg = rd32(E1000_PCS_CFG0);
817 reg &= ~E1000_PCS_CFG_PCS_EN; 830 reg &= ~E1000_PCS_CFG_PCS_EN;
@@ -826,8 +839,6 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
826 wrfl(); 839 wrfl();
827 msleep(1); 840 msleep(1);
828 } 841 }
829
830 return;
831} 842}
832 843
833/** 844/**
@@ -1183,6 +1194,22 @@ out:
1183} 1194}
1184 1195
1185/** 1196/**
1197 * igb_power_down_phy_copper_82575 - Remove link during PHY power down
1198 * @hw: pointer to the HW structure
1199 *
1200 * In the case of a PHY power down to save power, or to turn off link during a
1201 * driver unload, or wake on lan is not enabled, remove the link.
1202 **/
1203void igb_power_down_phy_copper_82575(struct e1000_hw *hw)
1204{
1205 /* If the management interface is not enabled, then power down */
1206 if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw)))
1207 igb_power_down_phy_copper(hw);
1208
1209 return;
1210}
1211
1212/**
1186 * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters 1213 * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters
1187 * @hw: pointer to the HW structure 1214 * @hw: pointer to the HW structure
1188 * 1215 *
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index d51c9927c819..fbe1c99c193c 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -29,6 +29,8 @@
29#define _E1000_82575_H_ 29#define _E1000_82575_H_
30 30
31extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw); 31extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
32extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
33extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
32extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); 34extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
33 35
34#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ 36#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
@@ -219,6 +221,9 @@ struct e1000_adv_tx_context_desc {
219#define E1000_VLVF_LVLAN 0x00100000 221#define E1000_VLVF_LVLAN 0x00100000
220#define E1000_VLVF_VLANID_ENABLE 0x80000000 222#define E1000_VLVF_VLANID_ENABLE 0x80000000
221 223
224#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
225#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
226
222#define E1000_IOVCTL 0x05BBC 227#define E1000_IOVCTL 0x05BBC
223#define E1000_IOVCTL_REUSE_VFQ 0x00000001 228#define E1000_IOVCTL_REUSE_VFQ 0x00000001
224 229
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 6e036ae3138f..fe6cf1b696c7 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -313,12 +313,6 @@
313#define E1000_PBA_34K 0x0022 313#define E1000_PBA_34K 0x0022
314#define E1000_PBA_64K 0x0040 /* 64KB */ 314#define E1000_PBA_64K 0x0040 /* 64KB */
315 315
316#define IFS_MAX 80
317#define IFS_MIN 40
318#define IFS_RATIO 4
319#define IFS_STEP 10
320#define MIN_NUM_XMITS 1000
321
322/* SW Semaphore Register */ 316/* SW Semaphore Register */
323#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ 317#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
324#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ 318#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
@@ -481,6 +475,7 @@
481/* PHY Control Register */ 475/* PHY Control Register */
482#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ 476#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
483#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ 477#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
478#define MII_CR_POWER_DOWN 0x0800 /* Power down */
484#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ 479#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
485#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ 480#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
486#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ 481#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index dbaeb5f5e0c7..448005276b26 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -339,19 +339,12 @@ struct e1000_mac_info {
339 339
340 enum e1000_mac_type type; 340 enum e1000_mac_type type;
341 341
342 u32 collision_delta;
343 u32 ledctl_default; 342 u32 ledctl_default;
344 u32 ledctl_mode1; 343 u32 ledctl_mode1;
345 u32 ledctl_mode2; 344 u32 ledctl_mode2;
346 u32 mc_filter_type; 345 u32 mc_filter_type;
347 u32 tx_packet_delta;
348 u32 txcw; 346 u32 txcw;
349 347
350 u16 current_ifs_val;
351 u16 ifs_max_val;
352 u16 ifs_min_val;
353 u16 ifs_ratio;
354 u16 ifs_step_size;
355 u16 mta_reg_count; 348 u16 mta_reg_count;
356 u16 uta_reg_count; 349 u16 uta_reg_count;
357 350
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index 2ad358a240bf..2a8a886b37eb 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -1304,76 +1304,6 @@ out:
1304} 1304}
1305 1305
1306/** 1306/**
1307 * igb_reset_adaptive - Reset Adaptive Interframe Spacing
1308 * @hw: pointer to the HW structure
1309 *
1310 * Reset the Adaptive Interframe Spacing throttle to default values.
1311 **/
1312void igb_reset_adaptive(struct e1000_hw *hw)
1313{
1314 struct e1000_mac_info *mac = &hw->mac;
1315
1316 if (!mac->adaptive_ifs) {
1317 hw_dbg("Not in Adaptive IFS mode!\n");
1318 goto out;
1319 }
1320
1321 if (!mac->ifs_params_forced) {
1322 mac->current_ifs_val = 0;
1323 mac->ifs_min_val = IFS_MIN;
1324 mac->ifs_max_val = IFS_MAX;
1325 mac->ifs_step_size = IFS_STEP;
1326 mac->ifs_ratio = IFS_RATIO;
1327 }
1328
1329 mac->in_ifs_mode = false;
1330 wr32(E1000_AIT, 0);
1331out:
1332 return;
1333}
1334
1335/**
1336 * igb_update_adaptive - Update Adaptive Interframe Spacing
1337 * @hw: pointer to the HW structure
1338 *
1339 * Update the Adaptive Interframe Spacing Throttle value based on the
1340 * time between transmitted packets and time between collisions.
1341 **/
1342void igb_update_adaptive(struct e1000_hw *hw)
1343{
1344 struct e1000_mac_info *mac = &hw->mac;
1345
1346 if (!mac->adaptive_ifs) {
1347 hw_dbg("Not in Adaptive IFS mode!\n");
1348 goto out;
1349 }
1350
1351 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
1352 if (mac->tx_packet_delta > MIN_NUM_XMITS) {
1353 mac->in_ifs_mode = true;
1354 if (mac->current_ifs_val < mac->ifs_max_val) {
1355 if (!mac->current_ifs_val)
1356 mac->current_ifs_val = mac->ifs_min_val;
1357 else
1358 mac->current_ifs_val +=
1359 mac->ifs_step_size;
1360 wr32(E1000_AIT,
1361 mac->current_ifs_val);
1362 }
1363 }
1364 } else {
1365 if (mac->in_ifs_mode &&
1366 (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
1367 mac->current_ifs_val = 0;
1368 mac->in_ifs_mode = false;
1369 wr32(E1000_AIT, 0);
1370 }
1371 }
1372out:
1373 return;
1374}
1375
1376/**
1377 * igb_validate_mdi_setting - Verify MDI/MDIx settings 1307 * igb_validate_mdi_setting - Verify MDI/MDIx settings
1378 * @hw: pointer to the HW structure 1308 * @hw: pointer to the HW structure
1379 * 1309 *
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h
index bca17d882417..601be99711c2 100644
--- a/drivers/net/igb/e1000_mac.h
+++ b/drivers/net/igb/e1000_mac.h
@@ -67,8 +67,6 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value);
67void igb_put_hw_semaphore(struct e1000_hw *hw); 67void igb_put_hw_semaphore(struct e1000_hw *hw);
68void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); 68void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
69s32 igb_check_alt_mac_addr(struct e1000_hw *hw); 69s32 igb_check_alt_mac_addr(struct e1000_hw *hw);
70void igb_reset_adaptive(struct e1000_hw *hw);
71void igb_update_adaptive(struct e1000_hw *hw);
72 70
73bool igb_enable_mng_pass_thru(struct e1000_hw *hw); 71bool igb_enable_mng_pass_thru(struct e1000_hw *hw);
74 72
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index 3670a66401b8..cf1f32300923 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -1931,6 +1931,41 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
1931} 1931}
1932 1932
1933/** 1933/**
1934 * igb_power_up_phy_copper - Restore copper link in case of PHY power down
1935 * @hw: pointer to the HW structure
1936 *
1937 * In the case of a PHY power down to save power, or to turn off link during a
1938 * driver unload, restore the link to previous settings.
1939 **/
1940void igb_power_up_phy_copper(struct e1000_hw *hw)
1941{
1942 u16 mii_reg = 0;
1943
1944 /* The PHY will retain its settings across a power down/up cycle */
1945 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
1946 mii_reg &= ~MII_CR_POWER_DOWN;
1947 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
1948}
1949
1950/**
1951 * igb_power_down_phy_copper - Power down copper PHY
1952 * @hw: pointer to the HW structure
1953 *
1954 * Power down PHY to save power when interface is down and wake on lan
1955 * is not enabled.
1956 **/
1957void igb_power_down_phy_copper(struct e1000_hw *hw)
1958{
1959 u16 mii_reg = 0;
1960
1961 /* The PHY will retain its settings across a power down/up cycle */
1962 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
1963 mii_reg |= MII_CR_POWER_DOWN;
1964 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
1965 msleep(1);
1966}
1967
1968/**
1934 * igb_check_polarity_82580 - Checks the polarity. 1969 * igb_check_polarity_82580 - Checks the polarity.
1935 * @hw: pointer to the HW structure 1970 * @hw: pointer to the HW structure
1936 * 1971 *
diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h
index 555eb54bb6ed..565a6dbb3714 100644
--- a/drivers/net/igb/e1000_phy.h
+++ b/drivers/net/igb/e1000_phy.h
@@ -60,6 +60,8 @@ s32 igb_setup_copper_link(struct e1000_hw *hw);
60s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); 60s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
61s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, 61s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
62 u32 usec_interval, bool *success); 62 u32 usec_interval, bool *success);
63void igb_power_up_phy_copper(struct e1000_hw *hw);
64void igb_power_down_phy_copper(struct e1000_hw *hw);
63s32 igb_phy_init_script_igp3(struct e1000_hw *hw); 65s32 igb_phy_init_script_igp3(struct e1000_hw *hw);
64s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); 66s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
65s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); 67s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index dd4e6ffd29f5..abb7333a1fbf 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -310,6 +310,7 @@
310#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) 310#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
311#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine 311#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
312 * Filter - RW */ 312 * Filter - RW */
313#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
313 314
314#define wr32(reg, value) (writel(value, hw->hw_addr + reg)) 315#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
315#define rd32(reg) (readl(hw->hw_addr + reg)) 316#define rd32(reg) (readl(hw->hw_addr + reg))
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index b1c1eb88893f..a1775705b24c 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -75,11 +75,14 @@ struct vf_data_storage {
75 u16 vlans_enabled; 75 u16 vlans_enabled;
76 u32 flags; 76 u32 flags;
77 unsigned long last_nack; 77 unsigned long last_nack;
78 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
79 u16 pf_qos;
78}; 80};
79 81
80#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ 82#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
81#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */ 83#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
82#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */ 84#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
85#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */
83 86
84/* RX descriptor control thresholds. 87/* RX descriptor control thresholds.
85 * PTHRESH - MAC will consider prefetch if it has fewer than this number of 88 * PTHRESH - MAC will consider prefetch if it has fewer than this number of
@@ -92,13 +95,13 @@ struct vf_data_storage {
92 * descriptors until either it has this many to write back, or the 95 * descriptors until either it has this many to write back, or the
93 * ITR timer expires. 96 * ITR timer expires.
94 */ 97 */
95#define IGB_RX_PTHRESH (hw->mac.type <= e1000_82576 ? 16 : 8) 98#define IGB_RX_PTHRESH 8
96#define IGB_RX_HTHRESH 8 99#define IGB_RX_HTHRESH 8
97#define IGB_RX_WTHRESH 1 100#define IGB_RX_WTHRESH 1
98#define IGB_TX_PTHRESH 8 101#define IGB_TX_PTHRESH 8
99#define IGB_TX_HTHRESH 1 102#define IGB_TX_HTHRESH 1
100#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \ 103#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
101 adapter->msix_entries) ? 0 : 16) 104 adapter->msix_entries) ? 1 : 16)
102 105
103/* this is the size past which hardware will drop packets when setting LPE=0 */ 106/* this is the size past which hardware will drop packets when setting LPE=0 */
104#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 107#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
@@ -138,6 +141,7 @@ struct igb_buffer {
138 u16 length; 141 u16 length;
139 u16 next_to_watch; 142 u16 next_to_watch;
140 u16 mapped_as_page; 143 u16 mapped_as_page;
144 u16 gso_segs;
141 }; 145 };
142 /* RX */ 146 /* RX */
143 struct { 147 struct {
@@ -173,7 +177,6 @@ struct igb_q_vector {
173 177
174 u16 itr_val; 178 u16 itr_val;
175 u8 set_itr; 179 u8 set_itr;
176 u8 itr_shift;
177 void __iomem *itr_register; 180 void __iomem *itr_register;
178 181
179 char name[IFNAMSIZ + 9]; 182 char name[IFNAMSIZ + 9];
@@ -238,7 +241,6 @@ static inline int igb_desc_unused(struct igb_ring *ring)
238} 241}
239 242
240/* board specific private data structure */ 243/* board specific private data structure */
241
242struct igb_adapter { 244struct igb_adapter {
243 struct timer_list watchdog_timer; 245 struct timer_list watchdog_timer;
244 struct timer_list phy_info_timer; 246 struct timer_list phy_info_timer;
@@ -264,12 +266,12 @@ struct igb_adapter {
264 unsigned long led_status; 266 unsigned long led_status;
265 267
266 /* TX */ 268 /* TX */
267 struct igb_ring *tx_ring; /* One per active queue */ 269 struct igb_ring *tx_ring[16];
268 unsigned long tx_queue_len; 270 unsigned long tx_queue_len;
269 u32 tx_timeout_count; 271 u32 tx_timeout_count;
270 272
271 /* RX */ 273 /* RX */
272 struct igb_ring *rx_ring; /* One per active queue */ 274 struct igb_ring *rx_ring[16];
273 int num_tx_queues; 275 int num_tx_queues;
274 int num_rx_queues; 276 int num_rx_queues;
275 277
@@ -354,7 +356,9 @@ extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
354 struct igb_buffer *); 356 struct igb_buffer *);
355extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int); 357extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
356extern void igb_update_stats(struct igb_adapter *); 358extern void igb_update_stats(struct igb_adapter *);
359extern bool igb_has_link(struct igb_adapter *adapter);
357extern void igb_set_ethtool_ops(struct net_device *); 360extern void igb_set_ethtool_ops(struct net_device *);
361extern void igb_power_up_link(struct igb_adapter *);
358 362
359static inline s32 igb_reset_phy(struct e1000_hw *hw) 363static inline s32 igb_reset_phy(struct e1000_hw *hw)
360{ 364{
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index f771a6c08777..a4cead12fd98 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -234,6 +234,24 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
234 return 0; 234 return 0;
235} 235}
236 236
237static u32 igb_get_link(struct net_device *netdev)
238{
239 struct igb_adapter *adapter = netdev_priv(netdev);
240 struct e1000_mac_info *mac = &adapter->hw.mac;
241
242 /*
243 * If the link is not reported up to netdev, interrupts are disabled,
244 * and so the physical link state may have changed since we last
245 * looked. Set get_link_status to make sure that the true link
246 * state is interrogated, rather than pulling a cached and possibly
247 * stale link state from the driver.
248 */
249 if (!netif_carrier_ok(netdev))
250 mac->get_link_status = 1;
251
252 return igb_has_link(adapter);
253}
254
237static void igb_get_pauseparam(struct net_device *netdev, 255static void igb_get_pauseparam(struct net_device *netdev,
238 struct ethtool_pauseparam *pause) 256 struct ethtool_pauseparam *pause)
239{ 257{
@@ -296,7 +314,7 @@ static int igb_set_pauseparam(struct net_device *netdev,
296static u32 igb_get_rx_csum(struct net_device *netdev) 314static u32 igb_get_rx_csum(struct net_device *netdev)
297{ 315{
298 struct igb_adapter *adapter = netdev_priv(netdev); 316 struct igb_adapter *adapter = netdev_priv(netdev);
299 return !!(adapter->rx_ring[0].flags & IGB_RING_FLAG_RX_CSUM); 317 return !!(adapter->rx_ring[0]->flags & IGB_RING_FLAG_RX_CSUM);
300} 318}
301 319
302static int igb_set_rx_csum(struct net_device *netdev, u32 data) 320static int igb_set_rx_csum(struct net_device *netdev, u32 data)
@@ -306,9 +324,9 @@ static int igb_set_rx_csum(struct net_device *netdev, u32 data)
306 324
307 for (i = 0; i < adapter->num_rx_queues; i++) { 325 for (i = 0; i < adapter->num_rx_queues; i++) {
308 if (data) 326 if (data)
309 adapter->rx_ring[i].flags |= IGB_RING_FLAG_RX_CSUM; 327 adapter->rx_ring[i]->flags |= IGB_RING_FLAG_RX_CSUM;
310 else 328 else
311 adapter->rx_ring[i].flags &= ~IGB_RING_FLAG_RX_CSUM; 329 adapter->rx_ring[i]->flags &= ~IGB_RING_FLAG_RX_CSUM;
312 } 330 }
313 331
314 return 0; 332 return 0;
@@ -771,9 +789,9 @@ static int igb_set_ringparam(struct net_device *netdev,
771 789
772 if (!netif_running(adapter->netdev)) { 790 if (!netif_running(adapter->netdev)) {
773 for (i = 0; i < adapter->num_tx_queues; i++) 791 for (i = 0; i < adapter->num_tx_queues; i++)
774 adapter->tx_ring[i].count = new_tx_count; 792 adapter->tx_ring[i]->count = new_tx_count;
775 for (i = 0; i < adapter->num_rx_queues; i++) 793 for (i = 0; i < adapter->num_rx_queues; i++)
776 adapter->rx_ring[i].count = new_rx_count; 794 adapter->rx_ring[i]->count = new_rx_count;
777 adapter->tx_ring_count = new_tx_count; 795 adapter->tx_ring_count = new_tx_count;
778 adapter->rx_ring_count = new_rx_count; 796 adapter->rx_ring_count = new_rx_count;
779 goto clear_reset; 797 goto clear_reset;
@@ -797,10 +815,10 @@ static int igb_set_ringparam(struct net_device *netdev,
797 * to the tx and rx ring structs. 815 * to the tx and rx ring structs.
798 */ 816 */
799 if (new_tx_count != adapter->tx_ring_count) { 817 if (new_tx_count != adapter->tx_ring_count) {
800 memcpy(temp_ring, adapter->tx_ring,
801 adapter->num_tx_queues * sizeof(struct igb_ring));
802
803 for (i = 0; i < adapter->num_tx_queues; i++) { 818 for (i = 0; i < adapter->num_tx_queues; i++) {
819 memcpy(&temp_ring[i], adapter->tx_ring[i],
820 sizeof(struct igb_ring));
821
804 temp_ring[i].count = new_tx_count; 822 temp_ring[i].count = new_tx_count;
805 err = igb_setup_tx_resources(&temp_ring[i]); 823 err = igb_setup_tx_resources(&temp_ring[i]);
806 if (err) { 824 if (err) {
@@ -812,20 +830,21 @@ static int igb_set_ringparam(struct net_device *netdev,
812 } 830 }
813 } 831 }
814 832
815 for (i = 0; i < adapter->num_tx_queues; i++) 833 for (i = 0; i < adapter->num_tx_queues; i++) {
816 igb_free_tx_resources(&adapter->tx_ring[i]); 834 igb_free_tx_resources(adapter->tx_ring[i]);
817 835
818 memcpy(adapter->tx_ring, temp_ring, 836 memcpy(adapter->tx_ring[i], &temp_ring[i],
819 adapter->num_tx_queues * sizeof(struct igb_ring)); 837 sizeof(struct igb_ring));
838 }
820 839
821 adapter->tx_ring_count = new_tx_count; 840 adapter->tx_ring_count = new_tx_count;
822 } 841 }
823 842
824 if (new_rx_count != adapter->rx_ring->count) { 843 if (new_rx_count != adapter->rx_ring_count) {
825 memcpy(temp_ring, adapter->rx_ring,
826 adapter->num_rx_queues * sizeof(struct igb_ring));
827
828 for (i = 0; i < adapter->num_rx_queues; i++) { 844 for (i = 0; i < adapter->num_rx_queues; i++) {
845 memcpy(&temp_ring[i], adapter->rx_ring[i],
846 sizeof(struct igb_ring));
847
829 temp_ring[i].count = new_rx_count; 848 temp_ring[i].count = new_rx_count;
830 err = igb_setup_rx_resources(&temp_ring[i]); 849 err = igb_setup_rx_resources(&temp_ring[i]);
831 if (err) { 850 if (err) {
@@ -838,11 +857,12 @@ static int igb_set_ringparam(struct net_device *netdev,
838 857
839 } 858 }
840 859
841 for (i = 0; i < adapter->num_rx_queues; i++) 860 for (i = 0; i < adapter->num_rx_queues; i++) {
842 igb_free_rx_resources(&adapter->rx_ring[i]); 861 igb_free_rx_resources(adapter->rx_ring[i]);
843 862
844 memcpy(adapter->rx_ring, temp_ring, 863 memcpy(adapter->rx_ring[i], &temp_ring[i],
845 adapter->num_rx_queues * sizeof(struct igb_ring)); 864 sizeof(struct igb_ring));
865 }
846 866
847 adapter->rx_ring_count = new_rx_count; 867 adapter->rx_ring_count = new_rx_count;
848 } 868 }
@@ -1704,6 +1724,9 @@ static void igb_diag_test(struct net_device *netdev,
1704 1724
1705 dev_info(&adapter->pdev->dev, "offline testing starting\n"); 1725 dev_info(&adapter->pdev->dev, "offline testing starting\n");
1706 1726
1727 /* power up link for link test */
1728 igb_power_up_link(adapter);
1729
1707 /* Link test performed before hardware reset so autoneg doesn't 1730 /* Link test performed before hardware reset so autoneg doesn't
1708 * interfere with test result */ 1731 * interfere with test result */
1709 if (igb_link_test(adapter, &data[4])) 1732 if (igb_link_test(adapter, &data[4]))
@@ -1727,6 +1750,8 @@ static void igb_diag_test(struct net_device *netdev,
1727 eth_test->flags |= ETH_TEST_FL_FAILED; 1750 eth_test->flags |= ETH_TEST_FL_FAILED;
1728 1751
1729 igb_reset(adapter); 1752 igb_reset(adapter);
1753 /* power up link for loopback test */
1754 igb_power_up_link(adapter);
1730 if (igb_loopback_test(adapter, &data[3])) 1755 if (igb_loopback_test(adapter, &data[3]))
1731 eth_test->flags |= ETH_TEST_FL_FAILED; 1756 eth_test->flags |= ETH_TEST_FL_FAILED;
1732 1757
@@ -1745,9 +1770,14 @@ static void igb_diag_test(struct net_device *netdev,
1745 dev_open(netdev); 1770 dev_open(netdev);
1746 } else { 1771 } else {
1747 dev_info(&adapter->pdev->dev, "online testing starting\n"); 1772 dev_info(&adapter->pdev->dev, "online testing starting\n");
1748 /* Online tests */ 1773
1749 if (igb_link_test(adapter, &data[4])) 1774 /* PHY is powered down when interface is down */
1750 eth_test->flags |= ETH_TEST_FL_FAILED; 1775 if (!netif_carrier_ok(netdev)) {
1776 data[4] = 0;
1777 } else {
1778 if (igb_link_test(adapter, &data[4]))
1779 eth_test->flags |= ETH_TEST_FL_FAILED;
1780 }
1751 1781
1752 /* Online tests aren't run; pass by default */ 1782 /* Online tests aren't run; pass by default */
1753 data[0] = 0; 1783 data[0] = 0;
@@ -1812,7 +1842,8 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1812 struct igb_adapter *adapter = netdev_priv(netdev); 1842 struct igb_adapter *adapter = netdev_priv(netdev);
1813 1843
1814 wol->supported = WAKE_UCAST | WAKE_MCAST | 1844 wol->supported = WAKE_UCAST | WAKE_MCAST |
1815 WAKE_BCAST | WAKE_MAGIC; 1845 WAKE_BCAST | WAKE_MAGIC |
1846 WAKE_PHY;
1816 wol->wolopts = 0; 1847 wol->wolopts = 0;
1817 1848
1818 /* this function will set ->supported = 0 and return 1 if wol is not 1849 /* this function will set ->supported = 0 and return 1 if wol is not
@@ -1835,15 +1866,15 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1835 wol->wolopts |= WAKE_BCAST; 1866 wol->wolopts |= WAKE_BCAST;
1836 if (adapter->wol & E1000_WUFC_MAG) 1867 if (adapter->wol & E1000_WUFC_MAG)
1837 wol->wolopts |= WAKE_MAGIC; 1868 wol->wolopts |= WAKE_MAGIC;
1838 1869 if (adapter->wol & E1000_WUFC_LNKC)
1839 return; 1870 wol->wolopts |= WAKE_PHY;
1840} 1871}
1841 1872
1842static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 1873static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1843{ 1874{
1844 struct igb_adapter *adapter = netdev_priv(netdev); 1875 struct igb_adapter *adapter = netdev_priv(netdev);
1845 1876
1846 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 1877 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
1847 return -EOPNOTSUPP; 1878 return -EOPNOTSUPP;
1848 1879
1849 if (igb_wol_exclusion(adapter, wol) || 1880 if (igb_wol_exclusion(adapter, wol) ||
@@ -1861,6 +1892,8 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1861 adapter->wol |= E1000_WUFC_BC; 1892 adapter->wol |= E1000_WUFC_BC;
1862 if (wol->wolopts & WAKE_MAGIC) 1893 if (wol->wolopts & WAKE_MAGIC)
1863 adapter->wol |= E1000_WUFC_MAG; 1894 adapter->wol |= E1000_WUFC_MAG;
1895 if (wol->wolopts & WAKE_PHY)
1896 adapter->wol |= E1000_WUFC_LNKC;
1864 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1897 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1865 1898
1866 return 0; 1899 return 0;
@@ -2005,12 +2038,12 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
2005 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 2038 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2006 } 2039 }
2007 for (j = 0; j < adapter->num_tx_queues; j++) { 2040 for (j = 0; j < adapter->num_tx_queues; j++) {
2008 queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats; 2041 queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats;
2009 for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++) 2042 for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++)
2010 data[i] = queue_stat[k]; 2043 data[i] = queue_stat[k];
2011 } 2044 }
2012 for (j = 0; j < adapter->num_rx_queues; j++) { 2045 for (j = 0; j < adapter->num_rx_queues; j++) {
2013 queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats; 2046 queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats;
2014 for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++) 2047 for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++)
2015 data[i] = queue_stat[k]; 2048 data[i] = queue_stat[k];
2016 } 2049 }
@@ -2074,7 +2107,7 @@ static const struct ethtool_ops igb_ethtool_ops = {
2074 .get_msglevel = igb_get_msglevel, 2107 .get_msglevel = igb_get_msglevel,
2075 .set_msglevel = igb_set_msglevel, 2108 .set_msglevel = igb_set_msglevel,
2076 .nway_reset = igb_nway_reset, 2109 .nway_reset = igb_nway_reset,
2077 .get_link = ethtool_op_get_link, 2110 .get_link = igb_get_link,
2078 .get_eeprom_len = igb_get_eeprom_len, 2111 .get_eeprom_len = igb_get_eeprom_len,
2079 .get_eeprom = igb_get_eeprom, 2112 .get_eeprom = igb_get_eeprom,
2080 .set_eeprom = igb_set_eeprom, 2113 .set_eeprom = igb_set_eeprom,
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index c881347cb26d..583a21c1def3 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -60,7 +60,7 @@ static const struct e1000_info *igb_info_tbl[] = {
60 [board_82575] = &e1000_82575_info, 60 [board_82575] = &e1000_82575_info,
61}; 61};
62 62
63static struct pci_device_id igb_pci_tbl[] = { 63static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, 64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, 65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, 66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
@@ -133,6 +133,12 @@ static void igb_msg_task(struct igb_adapter *);
133static void igb_vmm_control(struct igb_adapter *); 133static void igb_vmm_control(struct igb_adapter *);
134static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *); 134static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
135static void igb_restore_vf_multicasts(struct igb_adapter *adapter); 135static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
136static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
137static int igb_ndo_set_vf_vlan(struct net_device *netdev,
138 int vf, u16 vlan, u8 qos);
139static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
140static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
141 struct ifla_vf_info *ivi);
136 142
137#ifdef CONFIG_PM 143#ifdef CONFIG_PM
138static int igb_suspend(struct pci_dev *, pm_message_t); 144static int igb_suspend(struct pci_dev *, pm_message_t);
@@ -312,31 +318,35 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
312 */ 318 */
313 if (adapter->vfs_allocated_count) { 319 if (adapter->vfs_allocated_count) {
314 for (; i < adapter->rss_queues; i++) 320 for (; i < adapter->rss_queues; i++)
315 adapter->rx_ring[i].reg_idx = rbase_offset + 321 adapter->rx_ring[i]->reg_idx = rbase_offset +
316 Q_IDX_82576(i); 322 Q_IDX_82576(i);
317 for (; j < adapter->rss_queues; j++) 323 for (; j < adapter->rss_queues; j++)
318 adapter->tx_ring[j].reg_idx = rbase_offset + 324 adapter->tx_ring[j]->reg_idx = rbase_offset +
319 Q_IDX_82576(j); 325 Q_IDX_82576(j);
320 } 326 }
321 case e1000_82575: 327 case e1000_82575:
322 case e1000_82580: 328 case e1000_82580:
323 default: 329 default:
324 for (; i < adapter->num_rx_queues; i++) 330 for (; i < adapter->num_rx_queues; i++)
325 adapter->rx_ring[i].reg_idx = rbase_offset + i; 331 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
326 for (; j < adapter->num_tx_queues; j++) 332 for (; j < adapter->num_tx_queues; j++)
327 adapter->tx_ring[j].reg_idx = rbase_offset + j; 333 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
328 break; 334 break;
329 } 335 }
330} 336}
331 337
332static void igb_free_queues(struct igb_adapter *adapter) 338static void igb_free_queues(struct igb_adapter *adapter)
333{ 339{
334 kfree(adapter->tx_ring); 340 int i;
335 kfree(adapter->rx_ring);
336
337 adapter->tx_ring = NULL;
338 adapter->rx_ring = NULL;
339 341
342 for (i = 0; i < adapter->num_tx_queues; i++) {
343 kfree(adapter->tx_ring[i]);
344 adapter->tx_ring[i] = NULL;
345 }
346 for (i = 0; i < adapter->num_rx_queues; i++) {
347 kfree(adapter->rx_ring[i]);
348 adapter->rx_ring[i] = NULL;
349 }
340 adapter->num_rx_queues = 0; 350 adapter->num_rx_queues = 0;
341 adapter->num_tx_queues = 0; 351 adapter->num_tx_queues = 0;
342} 352}
@@ -350,20 +360,13 @@ static void igb_free_queues(struct igb_adapter *adapter)
350 **/ 360 **/
351static int igb_alloc_queues(struct igb_adapter *adapter) 361static int igb_alloc_queues(struct igb_adapter *adapter)
352{ 362{
363 struct igb_ring *ring;
353 int i; 364 int i;
354 365
355 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
356 sizeof(struct igb_ring), GFP_KERNEL);
357 if (!adapter->tx_ring)
358 goto err;
359
360 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
361 sizeof(struct igb_ring), GFP_KERNEL);
362 if (!adapter->rx_ring)
363 goto err;
364
365 for (i = 0; i < adapter->num_tx_queues; i++) { 366 for (i = 0; i < adapter->num_tx_queues; i++) {
366 struct igb_ring *ring = &(adapter->tx_ring[i]); 367 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
368 if (!ring)
369 goto err;
367 ring->count = adapter->tx_ring_count; 370 ring->count = adapter->tx_ring_count;
368 ring->queue_index = i; 371 ring->queue_index = i;
369 ring->pdev = adapter->pdev; 372 ring->pdev = adapter->pdev;
@@ -371,10 +374,13 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
371 /* For 82575, context index must be unique per ring. */ 374 /* For 82575, context index must be unique per ring. */
372 if (adapter->hw.mac.type == e1000_82575) 375 if (adapter->hw.mac.type == e1000_82575)
373 ring->flags = IGB_RING_FLAG_TX_CTX_IDX; 376 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
377 adapter->tx_ring[i] = ring;
374 } 378 }
375 379
376 for (i = 0; i < adapter->num_rx_queues; i++) { 380 for (i = 0; i < adapter->num_rx_queues; i++) {
377 struct igb_ring *ring = &(adapter->rx_ring[i]); 381 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
382 if (!ring)
383 goto err;
378 ring->count = adapter->rx_ring_count; 384 ring->count = adapter->rx_ring_count;
379 ring->queue_index = i; 385 ring->queue_index = i;
380 ring->pdev = adapter->pdev; 386 ring->pdev = adapter->pdev;
@@ -384,6 +390,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
384 /* set flag indicating ring supports SCTP checksum offload */ 390 /* set flag indicating ring supports SCTP checksum offload */
385 if (adapter->hw.mac.type >= e1000_82576) 391 if (adapter->hw.mac.type >= e1000_82576)
386 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM; 392 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
393 adapter->rx_ring[i] = ring;
387 } 394 }
388 395
389 igb_cache_ring_register(adapter); 396 igb_cache_ring_register(adapter);
@@ -498,6 +505,12 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
498 BUG(); 505 BUG();
499 break; 506 break;
500 } 507 }
508
509 /* add q_vector eims value to global eims_enable_mask */
510 adapter->eims_enable_mask |= q_vector->eims_value;
511
512 /* configure q_vector to set itr on first interrupt */
513 q_vector->set_itr = 1;
501} 514}
502 515
503/** 516/**
@@ -555,11 +568,8 @@ static void igb_configure_msix(struct igb_adapter *adapter)
555 568
556 adapter->eims_enable_mask |= adapter->eims_other; 569 adapter->eims_enable_mask |= adapter->eims_other;
557 570
558 for (i = 0; i < adapter->num_q_vectors; i++) { 571 for (i = 0; i < adapter->num_q_vectors; i++)
559 struct igb_q_vector *q_vector = adapter->q_vector[i]; 572 igb_assign_vector(adapter->q_vector[i], vector++);
560 igb_assign_vector(q_vector, vector++);
561 adapter->eims_enable_mask |= q_vector->eims_value;
562 }
563 573
564 wrfl(); 574 wrfl();
565} 575}
@@ -639,6 +649,8 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)
639 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { 649 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
640 struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; 650 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
641 adapter->q_vector[v_idx] = NULL; 651 adapter->q_vector[v_idx] = NULL;
652 if (!q_vector)
653 continue;
642 netif_napi_del(&q_vector->napi); 654 netif_napi_del(&q_vector->napi);
643 kfree(q_vector); 655 kfree(q_vector);
644 } 656 }
@@ -750,33 +762,24 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
750 if (!q_vector) 762 if (!q_vector)
751 goto err_out; 763 goto err_out;
752 q_vector->adapter = adapter; 764 q_vector->adapter = adapter;
753 q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
754 q_vector->itr_register = hw->hw_addr + E1000_EITR(0); 765 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
755 q_vector->itr_val = IGB_START_ITR; 766 q_vector->itr_val = IGB_START_ITR;
756 q_vector->set_itr = 1;
757 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); 767 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
758 adapter->q_vector[v_idx] = q_vector; 768 adapter->q_vector[v_idx] = q_vector;
759 } 769 }
760 return 0; 770 return 0;
761 771
762err_out: 772err_out:
763 while (v_idx) { 773 igb_free_q_vectors(adapter);
764 v_idx--;
765 q_vector = adapter->q_vector[v_idx];
766 netif_napi_del(&q_vector->napi);
767 kfree(q_vector);
768 adapter->q_vector[v_idx] = NULL;
769 }
770 return -ENOMEM; 774 return -ENOMEM;
771} 775}
772 776
773static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter, 777static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
774 int ring_idx, int v_idx) 778 int ring_idx, int v_idx)
775{ 779{
776 struct igb_q_vector *q_vector; 780 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
777 781
778 q_vector = adapter->q_vector[v_idx]; 782 q_vector->rx_ring = adapter->rx_ring[ring_idx];
779 q_vector->rx_ring = &adapter->rx_ring[ring_idx];
780 q_vector->rx_ring->q_vector = q_vector; 783 q_vector->rx_ring->q_vector = q_vector;
781 q_vector->itr_val = adapter->rx_itr_setting; 784 q_vector->itr_val = adapter->rx_itr_setting;
782 if (q_vector->itr_val && q_vector->itr_val <= 3) 785 if (q_vector->itr_val && q_vector->itr_val <= 3)
@@ -786,10 +789,9 @@ static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
786static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter, 789static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
787 int ring_idx, int v_idx) 790 int ring_idx, int v_idx)
788{ 791{
789 struct igb_q_vector *q_vector; 792 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
790 793
791 q_vector = adapter->q_vector[v_idx]; 794 q_vector->tx_ring = adapter->tx_ring[ring_idx];
792 q_vector->tx_ring = &adapter->tx_ring[ring_idx];
793 q_vector->tx_ring->q_vector = q_vector; 795 q_vector->tx_ring->q_vector = q_vector;
794 q_vector->itr_val = adapter->tx_itr_setting; 796 q_vector->itr_val = adapter->tx_itr_setting;
795 if (q_vector->itr_val && q_vector->itr_val <= 3) 797 if (q_vector->itr_val && q_vector->itr_val <= 3)
@@ -1099,7 +1101,7 @@ static void igb_configure(struct igb_adapter *adapter)
1099 * at least 1 descriptor unused to make sure 1101 * at least 1 descriptor unused to make sure
1100 * next_to_use != next_to_clean */ 1102 * next_to_use != next_to_clean */
1101 for (i = 0; i < adapter->num_rx_queues; i++) { 1103 for (i = 0; i < adapter->num_rx_queues; i++) {
1102 struct igb_ring *ring = &adapter->rx_ring[i]; 1104 struct igb_ring *ring = adapter->rx_ring[i];
1103 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring)); 1105 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
1104 } 1106 }
1105 1107
@@ -1107,6 +1109,29 @@ static void igb_configure(struct igb_adapter *adapter)
1107 adapter->tx_queue_len = netdev->tx_queue_len; 1109 adapter->tx_queue_len = netdev->tx_queue_len;
1108} 1110}
1109 1111
1112/**
1113 * igb_power_up_link - Power up the phy/serdes link
1114 * @adapter: address of board private structure
1115 **/
1116void igb_power_up_link(struct igb_adapter *adapter)
1117{
1118 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1119 igb_power_up_phy_copper(&adapter->hw);
1120 else
1121 igb_power_up_serdes_link_82575(&adapter->hw);
1122}
1123
1124/**
1125 * igb_power_down_link - Power down the phy/serdes link
1126 * @adapter: address of board private structure
1127 */
1128static void igb_power_down_link(struct igb_adapter *adapter)
1129{
1130 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1131 igb_power_down_phy_copper_82575(&adapter->hw);
1132 else
1133 igb_shutdown_serdes_link_82575(&adapter->hw);
1134}
1110 1135
1111/** 1136/**
1112 * igb_up - Open the interface and prepare it to handle traffic 1137 * igb_up - Open the interface and prepare it to handle traffic
@@ -1328,12 +1353,14 @@ void igb_reset(struct igb_adapter *adapter)
1328 wr32(E1000_PCIEMISC, 1353 wr32(E1000_PCIEMISC,
1329 reg & ~E1000_PCIEMISC_LX_DECISION); 1354 reg & ~E1000_PCIEMISC_LX_DECISION);
1330 } 1355 }
1356 if (!netif_running(adapter->netdev))
1357 igb_power_down_link(adapter);
1358
1331 igb_update_mng_vlan(adapter); 1359 igb_update_mng_vlan(adapter);
1332 1360
1333 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 1361 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1334 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); 1362 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1335 1363
1336 igb_reset_adaptive(hw);
1337 igb_get_phy_info(hw); 1364 igb_get_phy_info(hw);
1338} 1365}
1339 1366
@@ -1352,6 +1379,10 @@ static const struct net_device_ops igb_netdev_ops = {
1352 .ndo_vlan_rx_register = igb_vlan_rx_register, 1379 .ndo_vlan_rx_register = igb_vlan_rx_register,
1353 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, 1380 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1354 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, 1381 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
1382 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1383 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1384 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1385 .ndo_get_vf_config = igb_ndo_get_vf_config,
1355#ifdef CONFIG_NET_POLL_CONTROLLER 1386#ifdef CONFIG_NET_POLL_CONTROLLER
1356 .ndo_poll_controller = igb_netpoll, 1387 .ndo_poll_controller = igb_netpoll,
1357#endif 1388#endif
@@ -1472,7 +1503,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1472 igb_get_bus_info_pcie(hw); 1503 igb_get_bus_info_pcie(hw);
1473 1504
1474 hw->phy.autoneg_wait_to_complete = false; 1505 hw->phy.autoneg_wait_to_complete = false;
1475 hw->mac.adaptive_ifs = true;
1476 1506
1477 /* Copper options */ 1507 /* Copper options */
1478 if (hw->phy.media_type == e1000_media_type_copper) { 1508 if (hw->phy.media_type == e1000_media_type_copper) {
@@ -1706,9 +1736,6 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1706 1736
1707 unregister_netdev(netdev); 1737 unregister_netdev(netdev);
1708 1738
1709 if (!igb_check_reset_block(hw))
1710 igb_reset_phy(hw);
1711
1712 igb_clear_interrupt_scheme(adapter); 1739 igb_clear_interrupt_scheme(adapter);
1713 1740
1714#ifdef CONFIG_PCI_IOV 1741#ifdef CONFIG_PCI_IOV
@@ -1984,7 +2011,7 @@ static int igb_open(struct net_device *netdev)
1984 if (err) 2011 if (err)
1985 goto err_setup_rx; 2012 goto err_setup_rx;
1986 2013
1987 /* e1000_power_up_phy(adapter); */ 2014 igb_power_up_link(adapter);
1988 2015
1989 /* before we allocate an interrupt, we must be ready to handle it. 2016 /* before we allocate an interrupt, we must be ready to handle it.
1990 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 2017 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
@@ -2026,7 +2053,7 @@ static int igb_open(struct net_device *netdev)
2026 2053
2027err_req_irq: 2054err_req_irq:
2028 igb_release_hw_control(adapter); 2055 igb_release_hw_control(adapter);
2029 /* e1000_power_down_phy(adapter); */ 2056 igb_power_down_link(adapter);
2030 igb_free_all_rx_resources(adapter); 2057 igb_free_all_rx_resources(adapter);
2031err_setup_rx: 2058err_setup_rx:
2032 igb_free_all_tx_resources(adapter); 2059 igb_free_all_tx_resources(adapter);
@@ -2114,19 +2141,19 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2114 int i, err = 0; 2141 int i, err = 0;
2115 2142
2116 for (i = 0; i < adapter->num_tx_queues; i++) { 2143 for (i = 0; i < adapter->num_tx_queues; i++) {
2117 err = igb_setup_tx_resources(&adapter->tx_ring[i]); 2144 err = igb_setup_tx_resources(adapter->tx_ring[i]);
2118 if (err) { 2145 if (err) {
2119 dev_err(&pdev->dev, 2146 dev_err(&pdev->dev,
2120 "Allocation for Tx Queue %u failed\n", i); 2147 "Allocation for Tx Queue %u failed\n", i);
2121 for (i--; i >= 0; i--) 2148 for (i--; i >= 0; i--)
2122 igb_free_tx_resources(&adapter->tx_ring[i]); 2149 igb_free_tx_resources(adapter->tx_ring[i]);
2123 break; 2150 break;
2124 } 2151 }
2125 } 2152 }
2126 2153
2127 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) { 2154 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
2128 int r_idx = i % adapter->num_tx_queues; 2155 int r_idx = i % adapter->num_tx_queues;
2129 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx]; 2156 adapter->multi_tx_table[i] = adapter->tx_ring[r_idx];
2130 } 2157 }
2131 return err; 2158 return err;
2132} 2159}
@@ -2209,7 +2236,7 @@ static void igb_configure_tx(struct igb_adapter *adapter)
2209 int i; 2236 int i;
2210 2237
2211 for (i = 0; i < adapter->num_tx_queues; i++) 2238 for (i = 0; i < adapter->num_tx_queues; i++)
2212 igb_configure_tx_ring(adapter, &adapter->tx_ring[i]); 2239 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
2213} 2240}
2214 2241
2215/** 2242/**
@@ -2267,12 +2294,12 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2267 int i, err = 0; 2294 int i, err = 0;
2268 2295
2269 for (i = 0; i < adapter->num_rx_queues; i++) { 2296 for (i = 0; i < adapter->num_rx_queues; i++) {
2270 err = igb_setup_rx_resources(&adapter->rx_ring[i]); 2297 err = igb_setup_rx_resources(adapter->rx_ring[i]);
2271 if (err) { 2298 if (err) {
2272 dev_err(&pdev->dev, 2299 dev_err(&pdev->dev,
2273 "Allocation for Rx Queue %u failed\n", i); 2300 "Allocation for Rx Queue %u failed\n", i);
2274 for (i--; i >= 0; i--) 2301 for (i--; i >= 0; i--)
2275 igb_free_rx_resources(&adapter->rx_ring[i]); 2302 igb_free_rx_resources(adapter->rx_ring[i]);
2276 break; 2303 break;
2277 } 2304 }
2278 } 2305 }
@@ -2479,7 +2506,8 @@ static void igb_rlpml_set(struct igb_adapter *adapter)
2479 wr32(E1000_RLPML, max_frame_size); 2506 wr32(E1000_RLPML, max_frame_size);
2480} 2507}
2481 2508
2482static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn) 2509static inline void igb_set_vmolr(struct igb_adapter *adapter,
2510 int vfn, bool aupe)
2483{ 2511{
2484 struct e1000_hw *hw = &adapter->hw; 2512 struct e1000_hw *hw = &adapter->hw;
2485 u32 vmolr; 2513 u32 vmolr;
@@ -2492,8 +2520,11 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
2492 return; 2520 return;
2493 2521
2494 vmolr = rd32(E1000_VMOLR(vfn)); 2522 vmolr = rd32(E1000_VMOLR(vfn));
2495 vmolr |= E1000_VMOLR_AUPE | /* Accept untagged packets */ 2523 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
2496 E1000_VMOLR_STRVLAN; /* Strip vlan tags */ 2524 if (aupe)
2525 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
2526 else
2527 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
2497 2528
2498 /* clear all bits that might not be set */ 2529 /* clear all bits that might not be set */
2499 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); 2530 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
@@ -2560,11 +2591,14 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
2560 E1000_SRRCTL_BSIZEPKT_SHIFT; 2591 E1000_SRRCTL_BSIZEPKT_SHIFT;
2561 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 2592 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2562 } 2593 }
2594 /* Only set Drop Enable if we are supporting multiple queues */
2595 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
2596 srrctl |= E1000_SRRCTL_DROP_EN;
2563 2597
2564 wr32(E1000_SRRCTL(reg_idx), srrctl); 2598 wr32(E1000_SRRCTL(reg_idx), srrctl);
2565 2599
2566 /* set filtering for VMDQ pools */ 2600 /* set filtering for VMDQ pools */
2567 igb_set_vmolr(adapter, reg_idx & 0x7); 2601 igb_set_vmolr(adapter, reg_idx & 0x7, true);
2568 2602
2569 /* enable receive descriptor fetching */ 2603 /* enable receive descriptor fetching */
2570 rxdctl = rd32(E1000_RXDCTL(reg_idx)); 2604 rxdctl = rd32(E1000_RXDCTL(reg_idx));
@@ -2596,7 +2630,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
2596 /* Setup the HW Rx Head and Tail Descriptor Pointers and 2630 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2597 * the Base and Length of the Rx Descriptor Ring */ 2631 * the Base and Length of the Rx Descriptor Ring */
2598 for (i = 0; i < adapter->num_rx_queues; i++) 2632 for (i = 0; i < adapter->num_rx_queues; i++)
2599 igb_configure_rx_ring(adapter, &adapter->rx_ring[i]); 2633 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
2600} 2634}
2601 2635
2602/** 2636/**
@@ -2633,7 +2667,7 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2633 int i; 2667 int i;
2634 2668
2635 for (i = 0; i < adapter->num_tx_queues; i++) 2669 for (i = 0; i < adapter->num_tx_queues; i++)
2636 igb_free_tx_resources(&adapter->tx_ring[i]); 2670 igb_free_tx_resources(adapter->tx_ring[i]);
2637} 2671}
2638 2672
2639void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring, 2673void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
@@ -2700,7 +2734,7 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2700 int i; 2734 int i;
2701 2735
2702 for (i = 0; i < adapter->num_tx_queues; i++) 2736 for (i = 0; i < adapter->num_tx_queues; i++)
2703 igb_clean_tx_ring(&adapter->tx_ring[i]); 2737 igb_clean_tx_ring(adapter->tx_ring[i]);
2704} 2738}
2705 2739
2706/** 2740/**
@@ -2737,7 +2771,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2737 int i; 2771 int i;
2738 2772
2739 for (i = 0; i < adapter->num_rx_queues; i++) 2773 for (i = 0; i < adapter->num_rx_queues; i++)
2740 igb_free_rx_resources(&adapter->rx_ring[i]); 2774 igb_free_rx_resources(adapter->rx_ring[i]);
2741} 2775}
2742 2776
2743/** 2777/**
@@ -2801,7 +2835,7 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2801 int i; 2835 int i;
2802 2836
2803 for (i = 0; i < adapter->num_rx_queues; i++) 2837 for (i = 0; i < adapter->num_rx_queues; i++)
2804 igb_clean_rx_ring(&adapter->rx_ring[i]); 2838 igb_clean_rx_ring(adapter->rx_ring[i]);
2805} 2839}
2806 2840
2807/** 2841/**
@@ -2843,38 +2877,30 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
2843{ 2877{
2844 struct igb_adapter *adapter = netdev_priv(netdev); 2878 struct igb_adapter *adapter = netdev_priv(netdev);
2845 struct e1000_hw *hw = &adapter->hw; 2879 struct e1000_hw *hw = &adapter->hw;
2846 struct dev_mc_list *mc_ptr = netdev->mc_list; 2880 struct dev_mc_list *mc_ptr;
2847 u8 *mta_list; 2881 u8 *mta_list;
2848 u32 vmolr = 0;
2849 int i; 2882 int i;
2850 2883
2851 if (!netdev->mc_count) { 2884 if (netdev_mc_empty(netdev)) {
2852 /* nothing to program, so clear mc list */ 2885 /* nothing to program, so clear mc list */
2853 igb_update_mc_addr_list(hw, NULL, 0); 2886 igb_update_mc_addr_list(hw, NULL, 0);
2854 igb_restore_vf_multicasts(adapter); 2887 igb_restore_vf_multicasts(adapter);
2855 return 0; 2888 return 0;
2856 } 2889 }
2857 2890
2858 mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC); 2891 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
2859 if (!mta_list) 2892 if (!mta_list)
2860 return -ENOMEM; 2893 return -ENOMEM;
2861 2894
2862 /* set vmolr receive overflow multicast bit */
2863 vmolr |= E1000_VMOLR_ROMPE;
2864
2865 /* The shared function expects a packed array of only addresses. */ 2895 /* The shared function expects a packed array of only addresses. */
2866 mc_ptr = netdev->mc_list; 2896 i = 0;
2897 netdev_for_each_mc_addr(mc_ptr, netdev)
2898 memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2867 2899
2868 for (i = 0; i < netdev->mc_count; i++) {
2869 if (!mc_ptr)
2870 break;
2871 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2872 mc_ptr = mc_ptr->next;
2873 }
2874 igb_update_mc_addr_list(hw, mta_list, i); 2900 igb_update_mc_addr_list(hw, mta_list, i);
2875 kfree(mta_list); 2901 kfree(mta_list);
2876 2902
2877 return netdev->mc_count; 2903 return netdev_mc_count(netdev);
2878} 2904}
2879 2905
2880/** 2906/**
@@ -2895,12 +2921,13 @@ static int igb_write_uc_addr_list(struct net_device *netdev)
2895 int count = 0; 2921 int count = 0;
2896 2922
2897 /* return ENOMEM indicating insufficient memory for addresses */ 2923 /* return ENOMEM indicating insufficient memory for addresses */
2898 if (netdev->uc.count > rar_entries) 2924 if (netdev_uc_count(netdev) > rar_entries)
2899 return -ENOMEM; 2925 return -ENOMEM;
2900 2926
2901 if (netdev->uc.count && rar_entries) { 2927 if (!netdev_uc_empty(netdev) && rar_entries) {
2902 struct netdev_hw_addr *ha; 2928 struct netdev_hw_addr *ha;
2903 list_for_each_entry(ha, &netdev->uc.list, list) { 2929
2930 netdev_for_each_uc_addr(ha, netdev) {
2904 if (!rar_entries) 2931 if (!rar_entries)
2905 break; 2932 break;
2906 igb_rar_set_qsel(adapter, ha->addr, 2933 igb_rar_set_qsel(adapter, ha->addr,
@@ -3004,7 +3031,7 @@ static void igb_update_phy_info(unsigned long data)
3004 * igb_has_link - check shared code for link and determine up/down 3031 * igb_has_link - check shared code for link and determine up/down
3005 * @adapter: pointer to driver private info 3032 * @adapter: pointer to driver private info
3006 **/ 3033 **/
3007static bool igb_has_link(struct igb_adapter *adapter) 3034bool igb_has_link(struct igb_adapter *adapter)
3008{ 3035{
3009 struct e1000_hw *hw = &adapter->hw; 3036 struct e1000_hw *hw = &adapter->hw;
3010 bool link_active = false; 3037 bool link_active = false;
@@ -3121,10 +3148,9 @@ static void igb_watchdog_task(struct work_struct *work)
3121 } 3148 }
3122 3149
3123 igb_update_stats(adapter); 3150 igb_update_stats(adapter);
3124 igb_update_adaptive(hw);
3125 3151
3126 for (i = 0; i < adapter->num_tx_queues; i++) { 3152 for (i = 0; i < adapter->num_tx_queues; i++) {
3127 struct igb_ring *tx_ring = &adapter->tx_ring[i]; 3153 struct igb_ring *tx_ring = adapter->tx_ring[i];
3128 if (!netif_carrier_ok(netdev)) { 3154 if (!netif_carrier_ok(netdev)) {
3129 /* We've lost link, so the controller stops DMA, 3155 /* We've lost link, so the controller stops DMA,
3130 * but we've got queued Tx work that's never going 3156 * but we've got queued Tx work that's never going
@@ -3225,6 +3251,10 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
3225 else 3251 else
3226 new_val = avg_wire_size / 2; 3252 new_val = avg_wire_size / 2;
3227 3253
3254 /* when in itr mode 3 do not exceed 20K ints/sec */
3255 if (adapter->rx_itr_setting == 3 && new_val < 196)
3256 new_val = 196;
3257
3228set_itr_val: 3258set_itr_val:
3229 if (new_val != q_vector->itr_val) { 3259 if (new_val != q_vector->itr_val) {
3230 q_vector->itr_val = new_val; 3260 q_vector->itr_val = new_val;
@@ -3320,13 +3350,13 @@ static void igb_set_itr(struct igb_adapter *adapter)
3320 3350
3321 adapter->rx_itr = igb_update_itr(adapter, 3351 adapter->rx_itr = igb_update_itr(adapter,
3322 adapter->rx_itr, 3352 adapter->rx_itr,
3323 adapter->rx_ring->total_packets, 3353 q_vector->rx_ring->total_packets,
3324 adapter->rx_ring->total_bytes); 3354 q_vector->rx_ring->total_bytes);
3325 3355
3326 adapter->tx_itr = igb_update_itr(adapter, 3356 adapter->tx_itr = igb_update_itr(adapter,
3327 adapter->tx_itr, 3357 adapter->tx_itr,
3328 adapter->tx_ring->total_packets, 3358 q_vector->tx_ring->total_packets,
3329 adapter->tx_ring->total_bytes); 3359 q_vector->tx_ring->total_bytes);
3330 current_itr = max(adapter->rx_itr, adapter->tx_itr); 3360 current_itr = max(adapter->rx_itr, adapter->tx_itr);
3331 3361
3332 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 3362 /* conservative mode (itr 3) eliminates the lowest_latency setting */
@@ -3349,10 +3379,10 @@ static void igb_set_itr(struct igb_adapter *adapter)
3349 } 3379 }
3350 3380
3351set_itr_now: 3381set_itr_now:
3352 adapter->rx_ring->total_bytes = 0; 3382 q_vector->rx_ring->total_bytes = 0;
3353 adapter->rx_ring->total_packets = 0; 3383 q_vector->rx_ring->total_packets = 0;
3354 adapter->tx_ring->total_bytes = 0; 3384 q_vector->tx_ring->total_bytes = 0;
3355 adapter->tx_ring->total_packets = 0; 3385 q_vector->tx_ring->total_packets = 0;
3356 3386
3357 if (new_itr != q_vector->itr_val) { 3387 if (new_itr != q_vector->itr_val) {
3358 /* this attempts to bias the interrupt rate towards Bulk 3388 /* this attempts to bias the interrupt rate towards Bulk
@@ -3392,8 +3422,8 @@ static inline int igb_tso_adv(struct igb_ring *tx_ring,
3392 int err; 3422 int err;
3393 struct igb_buffer *buffer_info; 3423 struct igb_buffer *buffer_info;
3394 u32 info = 0, tu_cmd = 0; 3424 u32 info = 0, tu_cmd = 0;
3395 u32 mss_l4len_idx, l4len; 3425 u32 mss_l4len_idx;
3396 *hdr_len = 0; 3426 u8 l4len;
3397 3427
3398 if (skb_header_cloned(skb)) { 3428 if (skb_header_cloned(skb)) {
3399 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 3429 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
@@ -3599,6 +3629,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3599 } 3629 }
3600 3630
3601 tx_ring->buffer_info[i].skb = skb; 3631 tx_ring->buffer_info[i].skb = skb;
3632 tx_ring->buffer_info[i].gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
3602 tx_ring->buffer_info[first].next_to_watch = i; 3633 tx_ring->buffer_info[first].next_to_watch = i;
3603 3634
3604 return ++count; 3635 return ++count;
@@ -3612,14 +3643,12 @@ dma_error:
3612 buffer_info->length = 0; 3643 buffer_info->length = 0;
3613 buffer_info->next_to_watch = 0; 3644 buffer_info->next_to_watch = 0;
3614 buffer_info->mapped_as_page = false; 3645 buffer_info->mapped_as_page = false;
3615 count--;
3616 3646
3617 /* clear timestamp and dma mappings for remaining portion of packet */ 3647 /* clear timestamp and dma mappings for remaining portion of packet */
3618 while (count >= 0) { 3648 while (count--) {
3619 count--; 3649 if (i == 0)
3650 i = tx_ring->count;
3620 i--; 3651 i--;
3621 if (i < 0)
3622 i += tx_ring->count;
3623 buffer_info = &tx_ring->buffer_info[i]; 3652 buffer_info = &tx_ring->buffer_info[i];
3624 igb_unmap_and_free_tx_resource(tx_ring, buffer_info); 3653 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
3625 } 3654 }
@@ -3628,7 +3657,7 @@ dma_error:
3628} 3657}
3629 3658
3630static inline void igb_tx_queue_adv(struct igb_ring *tx_ring, 3659static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
3631 int tx_flags, int count, u32 paylen, 3660 u32 tx_flags, int count, u32 paylen,
3632 u8 hdr_len) 3661 u8 hdr_len)
3633{ 3662{
3634 union e1000_adv_tx_desc *tx_desc; 3663 union e1000_adv_tx_desc *tx_desc;
@@ -3716,7 +3745,7 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
3716 return 0; 3745 return 0;
3717} 3746}
3718 3747
3719static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size) 3748static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
3720{ 3749{
3721 if (igb_desc_unused(tx_ring) >= size) 3750 if (igb_desc_unused(tx_ring) >= size)
3722 return 0; 3751 return 0;
@@ -3727,10 +3756,10 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3727 struct igb_ring *tx_ring) 3756 struct igb_ring *tx_ring)
3728{ 3757{
3729 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); 3758 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
3730 unsigned int first;
3731 unsigned int tx_flags = 0;
3732 u8 hdr_len = 0;
3733 int tso = 0, count; 3759 int tso = 0, count;
3760 u32 tx_flags = 0;
3761 u16 first;
3762 u8 hdr_len = 0;
3734 union skb_shared_tx *shtx = skb_tx(skb); 3763 union skb_shared_tx *shtx = skb_tx(skb);
3735 3764
3736 /* need: 1 descriptor per page, 3765 /* need: 1 descriptor per page,
@@ -3911,7 +3940,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3911 netdev->mtu = new_mtu; 3940 netdev->mtu = new_mtu;
3912 3941
3913 for (i = 0; i < adapter->num_rx_queues; i++) 3942 for (i = 0; i < adapter->num_rx_queues; i++)
3914 adapter->rx_ring[i].rx_buffer_len = rx_buffer_len; 3943 adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len;
3915 3944
3916 if (netif_running(netdev)) 3945 if (netif_running(netdev))
3917 igb_up(adapter); 3946 igb_up(adapter);
@@ -3933,7 +3962,7 @@ void igb_update_stats(struct igb_adapter *adapter)
3933 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev); 3962 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
3934 struct e1000_hw *hw = &adapter->hw; 3963 struct e1000_hw *hw = &adapter->hw;
3935 struct pci_dev *pdev = adapter->pdev; 3964 struct pci_dev *pdev = adapter->pdev;
3936 u32 rnbc; 3965 u32 rnbc, reg;
3937 u16 phy_tmp; 3966 u16 phy_tmp;
3938 int i; 3967 int i;
3939 u64 bytes, packets; 3968 u64 bytes, packets;
@@ -3953,10 +3982,11 @@ void igb_update_stats(struct igb_adapter *adapter)
3953 packets = 0; 3982 packets = 0;
3954 for (i = 0; i < adapter->num_rx_queues; i++) { 3983 for (i = 0; i < adapter->num_rx_queues; i++) {
3955 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; 3984 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
3956 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp; 3985 struct igb_ring *ring = adapter->rx_ring[i];
3986 ring->rx_stats.drops += rqdpc_tmp;
3957 net_stats->rx_fifo_errors += rqdpc_tmp; 3987 net_stats->rx_fifo_errors += rqdpc_tmp;
3958 bytes += adapter->rx_ring[i].rx_stats.bytes; 3988 bytes += ring->rx_stats.bytes;
3959 packets += adapter->rx_ring[i].rx_stats.packets; 3989 packets += ring->rx_stats.packets;
3960 } 3990 }
3961 3991
3962 net_stats->rx_bytes = bytes; 3992 net_stats->rx_bytes = bytes;
@@ -3965,8 +3995,9 @@ void igb_update_stats(struct igb_adapter *adapter)
3965 bytes = 0; 3995 bytes = 0;
3966 packets = 0; 3996 packets = 0;
3967 for (i = 0; i < adapter->num_tx_queues; i++) { 3997 for (i = 0; i < adapter->num_tx_queues; i++) {
3968 bytes += adapter->tx_ring[i].tx_stats.bytes; 3998 struct igb_ring *ring = adapter->tx_ring[i];
3969 packets += adapter->tx_ring[i].tx_stats.packets; 3999 bytes += ring->tx_stats.bytes;
4000 packets += ring->tx_stats.packets;
3970 } 4001 }
3971 net_stats->tx_bytes = bytes; 4002 net_stats->tx_bytes = bytes;
3972 net_stats->tx_packets = packets; 4003 net_stats->tx_packets = packets;
@@ -4024,15 +4055,17 @@ void igb_update_stats(struct igb_adapter *adapter)
4024 adapter->stats.mptc += rd32(E1000_MPTC); 4055 adapter->stats.mptc += rd32(E1000_MPTC);
4025 adapter->stats.bptc += rd32(E1000_BPTC); 4056 adapter->stats.bptc += rd32(E1000_BPTC);
4026 4057
4027 /* used for adaptive IFS */ 4058 adapter->stats.tpt += rd32(E1000_TPT);
4028 hw->mac.tx_packet_delta = rd32(E1000_TPT); 4059 adapter->stats.colc += rd32(E1000_COLC);
4029 adapter->stats.tpt += hw->mac.tx_packet_delta;
4030 hw->mac.collision_delta = rd32(E1000_COLC);
4031 adapter->stats.colc += hw->mac.collision_delta;
4032 4060
4033 adapter->stats.algnerrc += rd32(E1000_ALGNERRC); 4061 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
4034 adapter->stats.rxerrc += rd32(E1000_RXERRC); 4062 /* read internal phy specific stats */
4035 adapter->stats.tncrs += rd32(E1000_TNCRS); 4063 reg = rd32(E1000_CTRL_EXT);
4064 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4065 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4066 adapter->stats.tncrs += rd32(E1000_TNCRS);
4067 }
4068
4036 adapter->stats.tsctc += rd32(E1000_TSCTC); 4069 adapter->stats.tsctc += rd32(E1000_TSCTC);
4037 adapter->stats.tsctfc += rd32(E1000_TSCTFC); 4070 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4038 4071
@@ -4095,6 +4128,9 @@ static irqreturn_t igb_msix_other(int irq, void *data)
4095 u32 icr = rd32(E1000_ICR); 4128 u32 icr = rd32(E1000_ICR);
4096 /* reading ICR causes bit 31 of EICR to be cleared */ 4129 /* reading ICR causes bit 31 of EICR to be cleared */
4097 4130
4131 if (icr & E1000_ICR_DRSTA)
4132 schedule_work(&adapter->reset_task);
4133
4098 if (icr & E1000_ICR_DOUTSYNC) { 4134 if (icr & E1000_ICR_DOUTSYNC) {
4099 /* HW is reporting DMA is out of sync */ 4135 /* HW is reporting DMA is out of sync */
4100 adapter->stats.doosync++; 4136 adapter->stats.doosync++;
@@ -4124,6 +4160,7 @@ static irqreturn_t igb_msix_other(int irq, void *data)
4124 4160
4125static void igb_write_itr(struct igb_q_vector *q_vector) 4161static void igb_write_itr(struct igb_q_vector *q_vector)
4126{ 4162{
4163 struct igb_adapter *adapter = q_vector->adapter;
4127 u32 itr_val = q_vector->itr_val & 0x7FFC; 4164 u32 itr_val = q_vector->itr_val & 0x7FFC;
4128 4165
4129 if (!q_vector->set_itr) 4166 if (!q_vector->set_itr)
@@ -4132,8 +4169,8 @@ static void igb_write_itr(struct igb_q_vector *q_vector)
4132 if (!itr_val) 4169 if (!itr_val)
4133 itr_val = 0x4; 4170 itr_val = 0x4;
4134 4171
4135 if (q_vector->itr_shift) 4172 if (adapter->hw.mac.type == e1000_82575)
4136 itr_val |= itr_val << q_vector->itr_shift; 4173 itr_val |= itr_val << 16;
4137 else 4174 else
4138 itr_val |= 0x8000000; 4175 itr_val |= 0x8000000;
4139 4176
@@ -4210,9 +4247,8 @@ static void igb_setup_dca(struct igb_adapter *adapter)
4210 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); 4247 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4211 4248
4212 for (i = 0; i < adapter->num_q_vectors; i++) { 4249 for (i = 0; i < adapter->num_q_vectors; i++) {
4213 struct igb_q_vector *q_vector = adapter->q_vector[i]; 4250 adapter->q_vector[i]->cpu = -1;
4214 q_vector->cpu = -1; 4251 igb_update_dca(adapter->q_vector[i]);
4215 igb_update_dca(q_vector);
4216 } 4252 }
4217} 4253}
4218 4254
@@ -4486,10 +4522,57 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4486 reg |= size; 4522 reg |= size;
4487 wr32(E1000_VMOLR(vf), reg); 4523 wr32(E1000_VMOLR(vf), reg);
4488 } 4524 }
4489 return 0;
4490 } 4525 }
4491 } 4526 }
4492 return -1; 4527 return 0;
4528}
4529
4530static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
4531{
4532 struct e1000_hw *hw = &adapter->hw;
4533
4534 if (vid)
4535 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
4536 else
4537 wr32(E1000_VMVIR(vf), 0);
4538}
4539
4540static int igb_ndo_set_vf_vlan(struct net_device *netdev,
4541 int vf, u16 vlan, u8 qos)
4542{
4543 int err = 0;
4544 struct igb_adapter *adapter = netdev_priv(netdev);
4545
4546 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
4547 return -EINVAL;
4548 if (vlan || qos) {
4549 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
4550 if (err)
4551 goto out;
4552 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
4553 igb_set_vmolr(adapter, vf, !vlan);
4554 adapter->vf_data[vf].pf_vlan = vlan;
4555 adapter->vf_data[vf].pf_qos = qos;
4556 dev_info(&adapter->pdev->dev,
4557 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
4558 if (test_bit(__IGB_DOWN, &adapter->state)) {
4559 dev_warn(&adapter->pdev->dev,
4560 "The VF VLAN has been set,"
4561 " but the PF device is not up.\n");
4562 dev_warn(&adapter->pdev->dev,
4563 "Bring the PF device up before"
4564 " attempting to use the VF device.\n");
4565 }
4566 } else {
4567 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
4568 false, vf);
4569 igb_set_vmvir(adapter, vlan, vf);
4570 igb_set_vmolr(adapter, vf, true);
4571 adapter->vf_data[vf].pf_vlan = 0;
4572 adapter->vf_data[vf].pf_qos = 0;
4573 }
4574out:
4575 return err;
4493} 4576}
4494 4577
4495static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) 4578static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
@@ -4502,15 +4585,21 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4502 4585
4503static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) 4586static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
4504{ 4587{
4505 /* clear all flags */ 4588 /* clear flags */
4506 adapter->vf_data[vf].flags = 0; 4589 adapter->vf_data[vf].flags &= ~(IGB_VF_FLAG_PF_SET_MAC);
4507 adapter->vf_data[vf].last_nack = jiffies; 4590 adapter->vf_data[vf].last_nack = jiffies;
4508 4591
4509 /* reset offloads to defaults */ 4592 /* reset offloads to defaults */
4510 igb_set_vmolr(adapter, vf); 4593 igb_set_vmolr(adapter, vf, true);
4511 4594
4512 /* reset vlans for device */ 4595 /* reset vlans for device */
4513 igb_clear_vf_vfta(adapter, vf); 4596 igb_clear_vf_vfta(adapter, vf);
4597 if (adapter->vf_data[vf].pf_vlan)
4598 igb_ndo_set_vf_vlan(adapter->netdev, vf,
4599 adapter->vf_data[vf].pf_vlan,
4600 adapter->vf_data[vf].pf_qos);
4601 else
4602 igb_clear_vf_vfta(adapter, vf);
4514 4603
4515 /* reset multicast table array for vf */ 4604 /* reset multicast table array for vf */
4516 adapter->vf_data[vf].num_vf_mc_hashes = 0; 4605 adapter->vf_data[vf].num_vf_mc_hashes = 0;
@@ -4524,7 +4613,8 @@ static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4524 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; 4613 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4525 4614
4526 /* generate a new mac address as we were hotplug removed/added */ 4615 /* generate a new mac address as we were hotplug removed/added */
4527 random_ether_addr(vf_mac); 4616 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
4617 random_ether_addr(vf_mac);
4528 4618
4529 /* process remaining reset events */ 4619 /* process remaining reset events */
4530 igb_vf_reset(adapter, vf); 4620 igb_vf_reset(adapter, vf);
@@ -4637,7 +4727,10 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4637 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf); 4727 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
4638 break; 4728 break;
4639 case E1000_VF_SET_VLAN: 4729 case E1000_VF_SET_VLAN:
4640 retval = igb_set_vf_vlan(adapter, msgbuf, vf); 4730 if (adapter->vf_data[vf].pf_vlan)
4731 retval = -1;
4732 else
4733 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4641 break; 4734 break;
4642 default: 4735 default:
4643 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); 4736 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
@@ -4718,6 +4811,9 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
4718 4811
4719 igb_write_itr(q_vector); 4812 igb_write_itr(q_vector);
4720 4813
4814 if (icr & E1000_ICR_DRSTA)
4815 schedule_work(&adapter->reset_task);
4816
4721 if (icr & E1000_ICR_DOUTSYNC) { 4817 if (icr & E1000_ICR_DOUTSYNC) {
4722 /* HW is reporting DMA is out of sync */ 4818 /* HW is reporting DMA is out of sync */
4723 adapter->stats.doosync++; 4819 adapter->stats.doosync++;
@@ -4757,6 +4853,9 @@ static irqreturn_t igb_intr(int irq, void *data)
4757 if (!(icr & E1000_ICR_INT_ASSERTED)) 4853 if (!(icr & E1000_ICR_INT_ASSERTED))
4758 return IRQ_NONE; 4854 return IRQ_NONE;
4759 4855
4856 if (icr & E1000_ICR_DRSTA)
4857 schedule_work(&adapter->reset_task);
4858
4760 if (icr & E1000_ICR_DOUTSYNC) { 4859 if (icr & E1000_ICR_DOUTSYNC) {
4761 /* HW is reporting DMA is out of sync */ 4860 /* HW is reporting DMA is out of sync */
4762 adapter->stats.doosync++; 4861 adapter->stats.doosync++;
@@ -4920,7 +5019,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
4920 if (skb) { 5019 if (skb) {
4921 unsigned int segs, bytecount; 5020 unsigned int segs, bytecount;
4922 /* gso_segs is currently only valid for tcp */ 5021 /* gso_segs is currently only valid for tcp */
4923 segs = skb_shinfo(skb)->gso_segs ?: 1; 5022 segs = buffer_info->gso_segs;
4924 /* multiply data chunks by size of headers */ 5023 /* multiply data chunks by size of headers */
4925 bytecount = ((segs - 1) * skb_headlen(skb)) + 5024 bytecount = ((segs - 1) * skb_headlen(skb)) +
4926 skb->len; 5025 skb->len;
@@ -5738,7 +5837,9 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
5738 5837
5739 *enable_wake = wufc || adapter->en_mng_pt; 5838 *enable_wake = wufc || adapter->en_mng_pt;
5740 if (!*enable_wake) 5839 if (!*enable_wake)
5741 igb_shutdown_serdes_link_82575(hw); 5840 igb_power_down_link(adapter);
5841 else
5842 igb_power_up_link(adapter);
5742 5843
5743 /* Release control of h/w to f/w. If f/w is AMT enabled, this 5844 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5744 * would have already happened in close and is redundant. */ 5845 * would have already happened in close and is redundant. */
@@ -5778,6 +5879,7 @@ static int igb_resume(struct pci_dev *pdev)
5778 5879
5779 pci_set_power_state(pdev, PCI_D0); 5880 pci_set_power_state(pdev, PCI_D0);
5780 pci_restore_state(pdev); 5881 pci_restore_state(pdev);
5882 pci_save_state(pdev);
5781 5883
5782 err = pci_enable_device_mem(pdev); 5884 err = pci_enable_device_mem(pdev);
5783 if (err) { 5885 if (err) {
@@ -5795,8 +5897,6 @@ static int igb_resume(struct pci_dev *pdev)
5795 return -ENOMEM; 5897 return -ENOMEM;
5796 } 5898 }
5797 5899
5798 /* e1000_power_up_phy(adapter); */
5799
5800 igb_reset(adapter); 5900 igb_reset(adapter);
5801 5901
5802 /* let the f/w know that the h/w is now under the control of the 5902 /* let the f/w know that the h/w is now under the control of the
@@ -5905,6 +6005,7 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
5905 } else { 6005 } else {
5906 pci_set_master(pdev); 6006 pci_set_master(pdev);
5907 pci_restore_state(pdev); 6007 pci_restore_state(pdev);
6008 pci_save_state(pdev);
5908 6009
5909 pci_enable_wake(pdev, PCI_D3hot, 0); 6010 pci_enable_wake(pdev, PCI_D3hot, 0);
5910 pci_enable_wake(pdev, PCI_D3cold, 0); 6011 pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -5993,6 +6094,43 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
5993 return 0; 6094 return 0;
5994} 6095}
5995 6096
6097static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6098{
6099 struct igb_adapter *adapter = netdev_priv(netdev);
6100 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6101 return -EINVAL;
6102 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6103 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6104 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6105 " change effective.");
6106 if (test_bit(__IGB_DOWN, &adapter->state)) {
6107 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6108 " but the PF device is not up.\n");
6109 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6110 " attempting to use the VF device.\n");
6111 }
6112 return igb_set_vf_mac(adapter, vf, mac);
6113}
6114
6115static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6116{
6117 return -EOPNOTSUPP;
6118}
6119
6120static int igb_ndo_get_vf_config(struct net_device *netdev,
6121 int vf, struct ifla_vf_info *ivi)
6122{
6123 struct igb_adapter *adapter = netdev_priv(netdev);
6124 if (vf >= adapter->vfs_allocated_count)
6125 return -EINVAL;
6126 ivi->vf = vf;
6127 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
6128 ivi->tx_rate = 0;
6129 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6130 ivi->qos = adapter->vf_data[vf].pf_qos;
6131 return 0;
6132}
6133
5996static void igb_vmm_control(struct igb_adapter *adapter) 6134static void igb_vmm_control(struct igb_adapter *adapter)
5997{ 6135{
5998 struct e1000_hw *hw = &adapter->hw; 6136 struct e1000_hw *hw = &adapter->hw;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 2aa71a766c35..a77afd8a14bb 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1403,8 +1403,8 @@ static void igbvf_set_multi(struct net_device *netdev)
1403 u8 *mta_list = NULL; 1403 u8 *mta_list = NULL;
1404 int i; 1404 int i;
1405 1405
1406 if (netdev->mc_count) { 1406 if (!netdev_mc_empty(netdev)) {
1407 mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC); 1407 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
1408 if (!mta_list) { 1408 if (!mta_list) {
1409 dev_err(&adapter->pdev->dev, 1409 dev_err(&adapter->pdev->dev,
1410 "failed to allocate multicast filter list\n"); 1410 "failed to allocate multicast filter list\n");
@@ -1413,15 +1413,9 @@ static void igbvf_set_multi(struct net_device *netdev)
1413 } 1413 }
1414 1414
1415 /* prepare a packed array of only addresses. */ 1415 /* prepare a packed array of only addresses. */
1416 mc_ptr = netdev->mc_list; 1416 i = 0;
1417 1417 netdev_for_each_mc_addr(mc_ptr, netdev)
1418 for (i = 0; i < netdev->mc_count; i++) { 1418 memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
1419 if (!mc_ptr)
1420 break;
1421 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr,
1422 ETH_ALEN);
1423 mc_ptr = mc_ptr->next;
1424 }
1425 1419
1426 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); 1420 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
1427 kfree(mta_list); 1421 kfree(mta_list);
@@ -2609,11 +2603,7 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter)
2609 struct pci_dev *pdev = adapter->pdev; 2603 struct pci_dev *pdev = adapter->pdev;
2610 2604
2611 dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n"); 2605 dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
2612 dev_info(&pdev->dev, "Address: %02x:%02x:%02x:%02x:%02x:%02x\n", 2606 dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
2613 /* MAC address */
2614 netdev->dev_addr[0], netdev->dev_addr[1],
2615 netdev->dev_addr[2], netdev->dev_addr[3],
2616 netdev->dev_addr[4], netdev->dev_addr[5]);
2617 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type); 2607 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
2618} 2608}
2619 2609
@@ -2779,11 +2769,8 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
2779 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 2769 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
2780 2770
2781 if (!is_valid_ether_addr(netdev->perm_addr)) { 2771 if (!is_valid_ether_addr(netdev->perm_addr)) {
2782 dev_err(&pdev->dev, "Invalid MAC Address: " 2772 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
2783 "%02x:%02x:%02x:%02x:%02x:%02x\n", 2773 netdev->dev_addr);
2784 netdev->dev_addr[0], netdev->dev_addr[1],
2785 netdev->dev_addr[2], netdev->dev_addr[3],
2786 netdev->dev_addr[4], netdev->dev_addr[5]);
2787 err = -EIO; 2774 err = -EIO;
2788 goto err_hw_init; 2775 goto err_hw_init;
2789 } 2776 }
@@ -2885,7 +2872,7 @@ static struct pci_error_handlers igbvf_err_handler = {
2885 .resume = igbvf_io_resume, 2872 .resume = igbvf_io_resume,
2886}; 2873};
2887 2874
2888static struct pci_device_id igbvf_pci_tbl[] = { 2875static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = {
2889 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf }, 2876 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
2890 { } /* terminate list */ 2877 { } /* terminate list */
2891}; 2878};
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 8ec15ab8c8c2..70871b9b045a 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -1383,7 +1383,7 @@ static void __devexit ioc3_remove_one (struct pci_dev *pdev)
1383 */ 1383 */
1384} 1384}
1385 1385
1386static struct pci_device_id ioc3_pci_tbl[] = { 1386static DEFINE_PCI_DEVICE_TABLE(ioc3_pci_tbl) = {
1387 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID }, 1387 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
1388 { 0 } 1388 { 0 }
1389}; 1389};
@@ -1664,11 +1664,10 @@ static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1664 1664
1665static void ioc3_set_multicast_list(struct net_device *dev) 1665static void ioc3_set_multicast_list(struct net_device *dev)
1666{ 1666{
1667 struct dev_mc_list *dmi = dev->mc_list; 1667 struct dev_mc_list *dmi;
1668 struct ioc3_private *ip = netdev_priv(dev); 1668 struct ioc3_private *ip = netdev_priv(dev);
1669 struct ioc3 *ioc3 = ip->regs; 1669 struct ioc3 *ioc3 = ip->regs;
1670 u64 ehar = 0; 1670 u64 ehar = 0;
1671 int i;
1672 1671
1673 netif_stop_queue(dev); /* Lock out others. */ 1672 netif_stop_queue(dev); /* Lock out others. */
1674 1673
@@ -1681,16 +1680,16 @@ static void ioc3_set_multicast_list(struct net_device *dev)
1681 ioc3_w_emcr(ip->emcr); /* Clear promiscuous. */ 1680 ioc3_w_emcr(ip->emcr); /* Clear promiscuous. */
1682 (void) ioc3_r_emcr(); 1681 (void) ioc3_r_emcr();
1683 1682
1684 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) { 1683 if ((dev->flags & IFF_ALLMULTI) ||
1684 (netdev_mc_count(dev) > 64)) {
1685 /* Too many for hashing to make sense or we want all 1685 /* Too many for hashing to make sense or we want all
1686 multicast packets anyway, so skip computing all the 1686 multicast packets anyway, so skip computing all the
1687 hashes and just accept all packets. */ 1687 hashes and just accept all packets. */
1688 ip->ehar_h = 0xffffffff; 1688 ip->ehar_h = 0xffffffff;
1689 ip->ehar_l = 0xffffffff; 1689 ip->ehar_l = 0xffffffff;
1690 } else { 1690 } else {
1691 for (i = 0; i < dev->mc_count; i++) { 1691 netdev_for_each_mc_addr(dmi, dev) {
1692 char *addr = dmi->dmi_addr; 1692 char *addr = dmi->dmi_addr;
1693 dmi = dmi->next;
1694 1693
1695 if (!(*addr & 1)) 1694 if (!(*addr & 1))
1696 continue; 1695 continue;
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index ba8d246d05a0..150415e83f61 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -88,17 +88,15 @@ static const char *ipg_brand_name[] = {
88 "Sundance Technology ST2021 based NIC", 88 "Sundance Technology ST2021 based NIC",
89 "Tamarack Microelectronics TC9020/9021 based NIC", 89 "Tamarack Microelectronics TC9020/9021 based NIC",
90 "Tamarack Microelectronics TC9020/9021 based NIC", 90 "Tamarack Microelectronics TC9020/9021 based NIC",
91 "D-Link NIC",
92 "D-Link NIC IP1000A" 91 "D-Link NIC IP1000A"
93}; 92};
94 93
95static struct pci_device_id ipg_pci_tbl[] __devinitdata = { 94static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = {
96 { PCI_VDEVICE(SUNDANCE, 0x1023), 0 }, 95 { PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
97 { PCI_VDEVICE(SUNDANCE, 0x2021), 1 }, 96 { PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
98 { PCI_VDEVICE(SUNDANCE, 0x1021), 2 }, 97 { PCI_VDEVICE(SUNDANCE, 0x1021), 2 },
99 { PCI_VDEVICE(DLINK, 0x9021), 3 }, 98 { PCI_VDEVICE(DLINK, 0x9021), 3 },
100 { PCI_VDEVICE(DLINK, 0x4000), 4 }, 99 { PCI_VDEVICE(DLINK, 0x4020), 4 },
101 { PCI_VDEVICE(DLINK, 0x4020), 5 },
102 { 0, } 100 { 0, }
103}; 101};
104 102
@@ -585,11 +583,11 @@ static void ipg_nic_set_multicast_list(struct net_device *dev)
585 receivemode = IPG_RM_RECEIVEALLFRAMES; 583 receivemode = IPG_RM_RECEIVEALLFRAMES;
586 } else if ((dev->flags & IFF_ALLMULTI) || 584 } else if ((dev->flags & IFF_ALLMULTI) ||
587 ((dev->flags & IFF_MULTICAST) && 585 ((dev->flags & IFF_MULTICAST) &&
588 (dev->mc_count > IPG_MULTICAST_HASHTABLE_SIZE))) { 586 (netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) {
589 /* NIC to be configured to receive all multicast 587 /* NIC to be configured to receive all multicast
590 * frames. */ 588 * frames. */
591 receivemode |= IPG_RM_RECEIVEMULTICAST; 589 receivemode |= IPG_RM_RECEIVEMULTICAST;
592 } else if ((dev->flags & IFF_MULTICAST) && (dev->mc_count > 0)) { 590 } else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
593 /* NIC to be configured to receive selected 591 /* NIC to be configured to receive selected
594 * multicast addresses. */ 592 * multicast addresses. */
595 receivemode |= IPG_RM_RECEIVEMULTICASTHASH; 593 receivemode |= IPG_RM_RECEIVEMULTICASTHASH;
@@ -610,8 +608,7 @@ static void ipg_nic_set_multicast_list(struct net_device *dev)
610 hashtable[1] = 0x00000000; 608 hashtable[1] = 0x00000000;
611 609
612 /* Cycle through all multicast addresses to filter. */ 610 /* Cycle through all multicast addresses to filter. */
613 for (mc_list_ptr = dev->mc_list; 611 netdev_for_each_mc_addr(mc_list_ptr, dev) {
614 mc_list_ptr != NULL; mc_list_ptr = mc_list_ptr->next) {
615 /* Calculate CRC result for each multicast address. */ 612 /* Calculate CRC result for each multicast address. */
616 hashindex = crc32_le(0xffffffff, mc_list_ptr->dmi_addr, 613 hashindex = crc32_le(0xffffffff, mc_list_ptr->dmi_addr,
617 ETH_ALEN); 614 ETH_ALEN);
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index f76384221422..af10e97345ce 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -64,6 +64,16 @@ endchoice
64 64
65comment "Dongle support" 65comment "Dongle support"
66 66
67config SH_SIR
68 tristate "SuperH SIR on UART"
69 depends on IRDA && SUPERH && \
70 (CPU_SUBTYPE_SH7722 || CPU_SUBTYPE_SH7723 || \
71 CPU_SUBTYPE_SH7724)
72 default n
73 help
74 Say Y here if your want to enable SIR function on SuperH UART
75 devices.
76
67config DONGLE 77config DONGLE
68 bool "Serial dongle support" 78 bool "Serial dongle support"
69 depends on IRTTY_SIR 79 depends on IRTTY_SIR
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index d82e1e3bd8c8..e030d47e2793 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_AU1000_FIR) += au1k_ir.o
22# SIR drivers 22# SIR drivers
23obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o 23obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o
24obj-$(CONFIG_BFIN_SIR) += bfin_sir.o 24obj-$(CONFIG_BFIN_SIR) += bfin_sir.o
25obj-$(CONFIG_SH_SIR) += sh_sir.o
25# dongle drivers for SIR drivers 26# dongle drivers for SIR drivers
26obj-$(CONFIG_ESI_DONGLE) += esi-sir.o 27obj-$(CONFIG_ESI_DONGLE) += esi-sir.o
27obj-$(CONFIG_TEKRAM_DONGLE) += tekram-sir.o 28obj-$(CONFIG_TEKRAM_DONGLE) += tekram-sir.o
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 2d7b5c1d5572..b7e6625ca75e 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -184,7 +184,7 @@
184#define CONFIG0H_DMA_ON_NORX CONFIG0H_DMA_OFF| OBOE_CONFIG0H_ENDMAC 184#define CONFIG0H_DMA_ON_NORX CONFIG0H_DMA_OFF| OBOE_CONFIG0H_ENDMAC
185#define CONFIG0H_DMA_ON CONFIG0H_DMA_ON_NORX | OBOE_CONFIG0H_ENRX 185#define CONFIG0H_DMA_ON CONFIG0H_DMA_ON_NORX | OBOE_CONFIG0H_ENRX
186 186
187static struct pci_device_id toshoboe_pci_tbl[] = { 187static DEFINE_PCI_DEVICE_TABLE(toshoboe_pci_tbl) = {
188 { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIR701, PCI_ANY_ID, PCI_ANY_ID, }, 188 { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIR701, PCI_ANY_ID, PCI_ANY_ID, },
189 { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIRD01, PCI_ANY_ID, PCI_ANY_ID, }, 189 { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIRD01, PCI_ANY_ID, PCI_ANY_ID, },
190 { } /* Terminating entry */ 190 { } /* Terminating entry */
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
new file mode 100644
index 000000000000..d7c983dc91ad
--- /dev/null
+++ b/drivers/net/irda/sh_sir.c
@@ -0,0 +1,823 @@
1/*
2 * SuperH IrDA Driver
3 *
4 * Copyright (C) 2009 Renesas Solutions Corp.
5 * Kuninori Morimoto <morimoto.kuninori@renesas.com>
6 *
7 * Based on bfin_sir.c
8 * Copyright 2006-2009 Analog Devices Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <net/irda/wrapper.h>
18#include <net/irda/irda_device.h>
19#include <asm/clock.h>
20
21#define DRIVER_NAME "sh_sir"
22
23#define RX_PHASE (1 << 0)
24#define TX_PHASE (1 << 1)
25#define TX_COMP_PHASE (1 << 2) /* tx complete */
26#define NONE_PHASE (1 << 31)
27
28#define IRIF_RINTCLR 0x0016 /* DMA rx interrupt source clear */
29#define IRIF_TINTCLR 0x0018 /* DMA tx interrupt source clear */
30#define IRIF_SIR0 0x0020 /* IrDA-SIR10 control */
31#define IRIF_SIR1 0x0022 /* IrDA-SIR10 baudrate error correction */
32#define IRIF_SIR2 0x0024 /* IrDA-SIR10 baudrate count */
33#define IRIF_SIR3 0x0026 /* IrDA-SIR10 status */
34#define IRIF_SIR_FRM 0x0028 /* Hardware frame processing set */
35#define IRIF_SIR_EOF 0x002A /* EOF value */
36#define IRIF_SIR_FLG 0x002C /* Flag clear */
37#define IRIF_UART_STS2 0x002E /* UART status 2 */
38#define IRIF_UART0 0x0030 /* UART control */
39#define IRIF_UART1 0x0032 /* UART status */
40#define IRIF_UART2 0x0034 /* UART mode */
41#define IRIF_UART3 0x0036 /* UART transmit data */
42#define IRIF_UART4 0x0038 /* UART receive data */
43#define IRIF_UART5 0x003A /* UART interrupt mask */
44#define IRIF_UART6 0x003C /* UART baud rate error correction */
45#define IRIF_UART7 0x003E /* UART baud rate count set */
46#define IRIF_CRC0 0x0040 /* CRC engine control */
47#define IRIF_CRC1 0x0042 /* CRC engine input data */
48#define IRIF_CRC2 0x0044 /* CRC engine calculation */
49#define IRIF_CRC3 0x0046 /* CRC engine output data 1 */
50#define IRIF_CRC4 0x0048 /* CRC engine output data 2 */
51
52/* IRIF_SIR0 */
53#define IRTPW (1 << 1) /* transmit pulse width select */
54#define IRERRC (1 << 0) /* Clear receive pulse width error */
55
56/* IRIF_SIR3 */
57#define IRERR (1 << 0) /* received pulse width Error */
58
59/* IRIF_SIR_FRM */
60#define EOFD (1 << 9) /* EOF detection flag */
61#define FRER (1 << 8) /* Frame Error bit */
62#define FRP (1 << 0) /* Frame processing set */
63
64/* IRIF_UART_STS2 */
65#define IRSME (1 << 6) /* Receive Sum Error flag */
66#define IROVE (1 << 5) /* Receive Overrun Error flag */
67#define IRFRE (1 << 4) /* Receive Framing Error flag */
68#define IRPRE (1 << 3) /* Receive Parity Error flag */
69
70/* IRIF_UART0_*/
71#define TBEC (1 << 2) /* Transmit Data Clear */
72#define RIE (1 << 1) /* Receive Enable */
73#define TIE (1 << 0) /* Transmit Enable */
74
75/* IRIF_UART1 */
76#define URSME (1 << 6) /* Receive Sum Error Flag */
77#define UROVE (1 << 5) /* Receive Overrun Error Flag */
78#define URFRE (1 << 4) /* Receive Framing Error Flag */
79#define URPRE (1 << 3) /* Receive Parity Error Flag */
80#define RBF (1 << 2) /* Receive Buffer Full Flag */
81#define TSBE (1 << 1) /* Transmit Shift Buffer Empty Flag */
82#define TBE (1 << 0) /* Transmit Buffer Empty flag */
83#define TBCOMP (TSBE | TBE)
84
85/* IRIF_UART5 */
86#define RSEIM (1 << 6) /* Receive Sum Error Flag IRQ Mask */
87#define RBFIM (1 << 2) /* Receive Buffer Full Flag IRQ Mask */
88#define TSBEIM (1 << 1) /* Transmit Shift Buffer Empty Flag IRQ Mask */
89#define TBEIM (1 << 0) /* Transmit Buffer Empty Flag IRQ Mask */
90#define RX_MASK (RSEIM | RBFIM)
91
92/* IRIF_CRC0 */
93#define CRC_RST (1 << 15) /* CRC Engine Reset */
94#define CRC_CT_MASK 0x0FFF
95
96/************************************************************************
97
98
99 structure
100
101
102************************************************************************/
103struct sh_sir_self {
104 void __iomem *membase;
105 unsigned int irq;
106 struct clk *clk;
107
108 struct net_device *ndev;
109
110 struct irlap_cb *irlap;
111 struct qos_info qos;
112
113 iobuff_t tx_buff;
114 iobuff_t rx_buff;
115};
116
117/************************************************************************
118
119
120 common function
121
122
123************************************************************************/
124static void sh_sir_write(struct sh_sir_self *self, u32 offset, u16 data)
125{
126 iowrite16(data, self->membase + offset);
127}
128
129static u16 sh_sir_read(struct sh_sir_self *self, u32 offset)
130{
131 return ioread16(self->membase + offset);
132}
133
134static void sh_sir_update_bits(struct sh_sir_self *self, u32 offset,
135 u16 mask, u16 data)
136{
137 u16 old, new;
138
139 old = sh_sir_read(self, offset);
140 new = (old & ~mask) | data;
141 if (old != new)
142 sh_sir_write(self, offset, new);
143}
144
145/************************************************************************
146
147
148 CRC function
149
150
151************************************************************************/
152static void sh_sir_crc_reset(struct sh_sir_self *self)
153{
154 sh_sir_write(self, IRIF_CRC0, CRC_RST);
155}
156
157static void sh_sir_crc_add(struct sh_sir_self *self, u8 data)
158{
159 sh_sir_write(self, IRIF_CRC1, (u16)data);
160}
161
162static u16 sh_sir_crc_cnt(struct sh_sir_self *self)
163{
164 return CRC_CT_MASK & sh_sir_read(self, IRIF_CRC0);
165}
166
167static u16 sh_sir_crc_out(struct sh_sir_self *self)
168{
169 return sh_sir_read(self, IRIF_CRC4);
170}
171
172static int sh_sir_crc_init(struct sh_sir_self *self)
173{
174 struct device *dev = &self->ndev->dev;
175 int ret = -EIO;
176 u16 val;
177
178 sh_sir_crc_reset(self);
179
180 sh_sir_crc_add(self, 0xCC);
181 sh_sir_crc_add(self, 0xF5);
182 sh_sir_crc_add(self, 0xF1);
183 sh_sir_crc_add(self, 0xA7);
184
185 val = sh_sir_crc_cnt(self);
186 if (4 != val) {
187 dev_err(dev, "CRC count error %x\n", val);
188 goto crc_init_out;
189 }
190
191 val = sh_sir_crc_out(self);
192 if (0x51DF != val) {
193 dev_err(dev, "CRC result error%x\n", val);
194 goto crc_init_out;
195 }
196
197 ret = 0;
198
199crc_init_out:
200
201 sh_sir_crc_reset(self);
202 return ret;
203}
204
205/************************************************************************
206
207
208 baud rate functions
209
210
211************************************************************************/
212#define SCLK_BASE 1843200 /* 1.8432MHz */
213
214static u32 sh_sir_find_sclk(struct clk *irda_clk)
215{
216 struct cpufreq_frequency_table *freq_table = irda_clk->freq_table;
217 struct clk *pclk = clk_get(NULL, "peripheral_clk");
218 u32 limit, min = 0xffffffff, tmp;
219 int i, index = 0;
220
221 limit = clk_get_rate(pclk);
222 clk_put(pclk);
223
224 /* IrDA can not set over peripheral_clk */
225 for (i = 0;
226 freq_table[i].frequency != CPUFREQ_TABLE_END;
227 i++) {
228 u32 freq = freq_table[i].frequency;
229
230 if (freq == CPUFREQ_ENTRY_INVALID)
231 continue;
232
233 /* IrDA should not over peripheral_clk */
234 if (freq > limit)
235 continue;
236
237 tmp = freq % SCLK_BASE;
238 if (tmp < min) {
239 min = tmp;
240 index = i;
241 }
242 }
243
244 return freq_table[index].frequency;
245}
246
247#define ERR_ROUNDING(a) ((a + 5000) / 10000)
248static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate)
249{
250 struct clk *clk;
251 struct device *dev = &self->ndev->dev;
252 u32 rate;
253 u16 uabca, uabc;
254 u16 irbca, irbc;
255 u32 min, rerr, tmp;
256 int i;
257
258 /* Baud Rate Error Correction x 10000 */
259 u32 rate_err_array[] = {
260 0000, 0625, 1250, 1875,
261 2500, 3125, 3750, 4375,
262 5000, 5625, 6250, 6875,
263 7500, 8125, 8750, 9375,
264 };
265
266 /*
267 * FIXME
268 *
269 * it support 9600 only now
270 */
271 switch (baudrate) {
272 case 9600:
273 break;
274 default:
275 dev_err(dev, "un-supported baudrate %d\n", baudrate);
276 return -EIO;
277 }
278
279 clk = clk_get(NULL, "irda_clk");
280 if (!clk) {
281 dev_err(dev, "can not get irda_clk\n");
282 return -EIO;
283 }
284
285 clk_set_rate(clk, sh_sir_find_sclk(clk));
286 rate = clk_get_rate(clk);
287 clk_put(clk);
288
289 dev_dbg(dev, "selected sclk = %d\n", rate);
290
291 /*
292 * CALCULATION
293 *
294 * 1843200 = system rate / (irbca + (irbc + 1))
295 */
296
297 irbc = rate / SCLK_BASE;
298
299 tmp = rate - (SCLK_BASE * irbc);
300 tmp *= 10000;
301
302 rerr = tmp / SCLK_BASE;
303
304 min = 0xffffffff;
305 irbca = 0;
306 for (i = 0; i < ARRAY_SIZE(rate_err_array); i++) {
307 tmp = abs(rate_err_array[i] - rerr);
308 if (min > tmp) {
309 min = tmp;
310 irbca = i;
311 }
312 }
313
314 tmp = rate / (irbc + ERR_ROUNDING(rate_err_array[irbca]));
315 if ((SCLK_BASE / 100) < abs(tmp - SCLK_BASE))
316 dev_warn(dev, "IrDA freq error margin over %d\n", tmp);
317
318 dev_dbg(dev, "target = %d, result = %d, infrared = %d.%d\n",
319 SCLK_BASE, tmp, irbc, rate_err_array[irbca]);
320
321 irbca = (irbca & 0xF) << 4;
322 irbc = (irbc - 1) & 0xF;
323
324 if (!irbc) {
325 dev_err(dev, "sh_sir can not set 0 in IRIF_SIR2\n");
326 return -EIO;
327 }
328
329 sh_sir_write(self, IRIF_SIR0, IRTPW | IRERRC);
330 sh_sir_write(self, IRIF_SIR1, irbca);
331 sh_sir_write(self, IRIF_SIR2, irbc);
332
333 /*
334 * CALCULATION
335 *
336 * BaudRate[bps] = system rate / (uabca + (uabc + 1) x 16)
337 */
338
339 uabc = rate / baudrate;
340 uabc = (uabc / 16) - 1;
341 uabc = (uabc + 1) * 16;
342
343 tmp = rate - (uabc * baudrate);
344 tmp *= 10000;
345
346 rerr = tmp / baudrate;
347
348 min = 0xffffffff;
349 uabca = 0;
350 for (i = 0; i < ARRAY_SIZE(rate_err_array); i++) {
351 tmp = abs(rate_err_array[i] - rerr);
352 if (min > tmp) {
353 min = tmp;
354 uabca = i;
355 }
356 }
357
358 tmp = rate / (uabc + ERR_ROUNDING(rate_err_array[uabca]));
359 if ((baudrate / 100) < abs(tmp - baudrate))
360 dev_warn(dev, "UART freq error margin over %d\n", tmp);
361
362 dev_dbg(dev, "target = %d, result = %d, uart = %d.%d\n",
363 baudrate, tmp,
364 uabc, rate_err_array[uabca]);
365
366 uabca = (uabca & 0xF) << 4;
367 uabc = (uabc / 16) - 1;
368
369 sh_sir_write(self, IRIF_UART6, uabca);
370 sh_sir_write(self, IRIF_UART7, uabc);
371
372 return 0;
373}
374
375/************************************************************************
376
377
378 iobuf function
379
380
381************************************************************************/
382static int __sh_sir_init_iobuf(iobuff_t *io, int size)
383{
384 io->head = kmalloc(size, GFP_KERNEL);
385 if (!io->head)
386 return -ENOMEM;
387
388 io->truesize = size;
389 io->in_frame = FALSE;
390 io->state = OUTSIDE_FRAME;
391 io->data = io->head;
392
393 return 0;
394}
395
396static void sh_sir_remove_iobuf(struct sh_sir_self *self)
397{
398 kfree(self->rx_buff.head);
399 kfree(self->tx_buff.head);
400
401 self->rx_buff.head = NULL;
402 self->tx_buff.head = NULL;
403}
404
405static int sh_sir_init_iobuf(struct sh_sir_self *self, int rxsize, int txsize)
406{
407 int err = -ENOMEM;
408
409 if (self->rx_buff.head ||
410 self->tx_buff.head) {
411 dev_err(&self->ndev->dev, "iobuff has already existed.");
412 return err;
413 }
414
415 err = __sh_sir_init_iobuf(&self->rx_buff, rxsize);
416 if (err)
417 goto iobuf_err;
418
419 err = __sh_sir_init_iobuf(&self->tx_buff, txsize);
420
421iobuf_err:
422 if (err)
423 sh_sir_remove_iobuf(self);
424
425 return err;
426}
427
428/************************************************************************
429
430
431 status function
432
433
434************************************************************************/
435static void sh_sir_clear_all_err(struct sh_sir_self *self)
436{
437 /* Clear error flag for receive pulse width */
438 sh_sir_update_bits(self, IRIF_SIR0, IRERRC, IRERRC);
439
440 /* Clear frame / EOF error flag */
441 sh_sir_write(self, IRIF_SIR_FLG, 0xffff);
442
443 /* Clear all status error */
444 sh_sir_write(self, IRIF_UART_STS2, 0);
445}
446
447static void sh_sir_set_phase(struct sh_sir_self *self, int phase)
448{
449 u16 uart5 = 0;
450 u16 uart0 = 0;
451
452 switch (phase) {
453 case TX_PHASE:
454 uart5 = TBEIM;
455 uart0 = TBEC | TIE;
456 break;
457 case TX_COMP_PHASE:
458 uart5 = TSBEIM;
459 uart0 = TIE;
460 break;
461 case RX_PHASE:
462 uart5 = RX_MASK;
463 uart0 = RIE;
464 break;
465 default:
466 break;
467 }
468
469 sh_sir_write(self, IRIF_UART5, uart5);
470 sh_sir_write(self, IRIF_UART0, uart0);
471}
472
473static int sh_sir_is_which_phase(struct sh_sir_self *self)
474{
475 u16 val = sh_sir_read(self, IRIF_UART5);
476
477 if (val & TBEIM)
478 return TX_PHASE;
479
480 if (val & TSBEIM)
481 return TX_COMP_PHASE;
482
483 if (val & RX_MASK)
484 return RX_PHASE;
485
486 return NONE_PHASE;
487}
488
489static void sh_sir_tx(struct sh_sir_self *self, int phase)
490{
491 switch (phase) {
492 case TX_PHASE:
493 if (0 >= self->tx_buff.len) {
494 sh_sir_set_phase(self, TX_COMP_PHASE);
495 } else {
496 sh_sir_write(self, IRIF_UART3, self->tx_buff.data[0]);
497 self->tx_buff.len--;
498 self->tx_buff.data++;
499 }
500 break;
501 case TX_COMP_PHASE:
502 sh_sir_set_phase(self, RX_PHASE);
503 netif_wake_queue(self->ndev);
504 break;
505 default:
506 dev_err(&self->ndev->dev, "should not happen\n");
507 break;
508 }
509}
510
511static int sh_sir_read_data(struct sh_sir_self *self)
512{
513 u16 val;
514 int timeout = 1024;
515
516 while (timeout--) {
517 val = sh_sir_read(self, IRIF_UART1);
518
519 /* data get */
520 if (val & RBF) {
521 if (val & (URSME | UROVE | URFRE | URPRE))
522 break;
523
524 return (int)sh_sir_read(self, IRIF_UART4);
525 }
526
527 udelay(1);
528 }
529
530 dev_err(&self->ndev->dev, "UART1 %04x : STATUS %04x\n",
531 val, sh_sir_read(self, IRIF_UART_STS2));
532
533 /* read data register for clear error */
534 sh_sir_read(self, IRIF_UART4);
535
536 return -1;
537}
538
539static void sh_sir_rx(struct sh_sir_self *self)
540{
541 int timeout = 1024;
542 int data;
543
544 while (timeout--) {
545 data = sh_sir_read_data(self);
546 if (data < 0)
547 break;
548
549 async_unwrap_char(self->ndev, &self->ndev->stats,
550 &self->rx_buff, (u8)data);
551 self->ndev->last_rx = jiffies;
552
553 if (EOFD & sh_sir_read(self, IRIF_SIR_FRM))
554 continue;
555
556 break;
557 }
558}
559
560static irqreturn_t sh_sir_irq(int irq, void *dev_id)
561{
562 struct sh_sir_self *self = dev_id;
563 struct device *dev = &self->ndev->dev;
564 int phase = sh_sir_is_which_phase(self);
565
566 switch (phase) {
567 case TX_COMP_PHASE:
568 case TX_PHASE:
569 sh_sir_tx(self, phase);
570 break;
571 case RX_PHASE:
572 if (sh_sir_read(self, IRIF_SIR3))
573 dev_err(dev, "rcv pulse width error occurred\n");
574
575 sh_sir_rx(self);
576 sh_sir_clear_all_err(self);
577 break;
578 default:
579 dev_err(dev, "unknown interrupt\n");
580 }
581
582 return IRQ_HANDLED;
583}
584
585/************************************************************************
586
587
588 net_device_ops function
589
590
591************************************************************************/
592static int sh_sir_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
593{
594 struct sh_sir_self *self = netdev_priv(ndev);
595 int speed = irda_get_next_speed(skb);
596
597 if ((0 < speed) &&
598 (9600 != speed)) {
599 dev_err(&ndev->dev, "support 9600 only (%d)\n", speed);
600 return -EIO;
601 }
602
603 netif_stop_queue(ndev);
604
605 self->tx_buff.data = self->tx_buff.head;
606 self->tx_buff.len = 0;
607 if (skb->len)
608 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
609 self->tx_buff.truesize);
610
611 sh_sir_set_phase(self, TX_PHASE);
612 dev_kfree_skb(skb);
613
614 return 0;
615}
616
617static int sh_sir_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
618{
619 /*
620 * FIXME
621 *
622 * This function is needed for irda framework.
623 * But nothing to do now
624 */
625 return 0;
626}
627
628static struct net_device_stats *sh_sir_stats(struct net_device *ndev)
629{
630 struct sh_sir_self *self = netdev_priv(ndev);
631
632 return &self->ndev->stats;
633}
634
635static int sh_sir_open(struct net_device *ndev)
636{
637 struct sh_sir_self *self = netdev_priv(ndev);
638 int err;
639
640 clk_enable(self->clk);
641 err = sh_sir_crc_init(self);
642 if (err)
643 goto open_err;
644
645 sh_sir_set_baudrate(self, 9600);
646
647 self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
648 if (!self->irlap)
649 goto open_err;
650
651 /*
652 * Now enable the interrupt then start the queue
653 */
654 sh_sir_update_bits(self, IRIF_SIR_FRM, FRP, FRP);
655 sh_sir_read(self, IRIF_UART1); /* flag clear */
656 sh_sir_read(self, IRIF_UART4); /* flag clear */
657 sh_sir_set_phase(self, RX_PHASE);
658
659 netif_start_queue(ndev);
660
661 dev_info(&self->ndev->dev, "opened\n");
662
663 return 0;
664
665open_err:
666 clk_disable(self->clk);
667
668 return err;
669}
670
671static int sh_sir_stop(struct net_device *ndev)
672{
673 struct sh_sir_self *self = netdev_priv(ndev);
674
675 /* Stop IrLAP */
676 if (self->irlap) {
677 irlap_close(self->irlap);
678 self->irlap = NULL;
679 }
680
681 netif_stop_queue(ndev);
682
683 dev_info(&ndev->dev, "stoped\n");
684
685 return 0;
686}
687
688static const struct net_device_ops sh_sir_ndo = {
689 .ndo_open = sh_sir_open,
690 .ndo_stop = sh_sir_stop,
691 .ndo_start_xmit = sh_sir_hard_xmit,
692 .ndo_do_ioctl = sh_sir_ioctl,
693 .ndo_get_stats = sh_sir_stats,
694};
695
696/************************************************************************
697
698
699 platform_driver function
700
701
702************************************************************************/
703static int __devinit sh_sir_probe(struct platform_device *pdev)
704{
705 struct net_device *ndev;
706 struct sh_sir_self *self;
707 struct resource *res;
708 char clk_name[8];
709 void __iomem *base;
710 unsigned int irq;
711 int err = -ENOMEM;
712
713 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
714 irq = platform_get_irq(pdev, 0);
715 if (!res || irq < 0) {
716 dev_err(&pdev->dev, "Not enough platform resources.\n");
717 goto exit;
718 }
719
720 ndev = alloc_irdadev(sizeof(*self));
721 if (!ndev)
722 goto exit;
723
724 base = ioremap_nocache(res->start, resource_size(res));
725 if (!base) {
726 err = -ENXIO;
727 dev_err(&pdev->dev, "Unable to ioremap.\n");
728 goto err_mem_1;
729 }
730
731 self = netdev_priv(ndev);
732 err = sh_sir_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
733 if (err)
734 goto err_mem_2;
735
736 snprintf(clk_name, sizeof(clk_name), "irda%d", pdev->id);
737 self->clk = clk_get(&pdev->dev, clk_name);
738 if (IS_ERR(self->clk)) {
739 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
740 goto err_mem_3;
741 }
742
743 irda_init_max_qos_capabilies(&self->qos);
744
745 ndev->netdev_ops = &sh_sir_ndo;
746 ndev->irq = irq;
747
748 self->membase = base;
749 self->ndev = ndev;
750 self->qos.baud_rate.bits &= IR_9600; /* FIXME */
751 self->qos.min_turn_time.bits = 1; /* 10 ms or more */
752
753 irda_qos_bits_to_value(&self->qos);
754
755 err = register_netdev(ndev);
756 if (err)
757 goto err_mem_4;
758
759 platform_set_drvdata(pdev, ndev);
760
761 if (request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self)) {
762 dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
763 goto err_mem_4;
764 }
765
766 dev_info(&pdev->dev, "SuperH IrDA probed\n");
767
768 goto exit;
769
770err_mem_4:
771 clk_put(self->clk);
772err_mem_3:
773 sh_sir_remove_iobuf(self);
774err_mem_2:
775 iounmap(self->membase);
776err_mem_1:
777 free_netdev(ndev);
778exit:
779 return err;
780}
781
782static int __devexit sh_sir_remove(struct platform_device *pdev)
783{
784 struct net_device *ndev = platform_get_drvdata(pdev);
785 struct sh_sir_self *self = netdev_priv(ndev);
786
787 if (!self)
788 return 0;
789
790 unregister_netdev(ndev);
791 clk_put(self->clk);
792 sh_sir_remove_iobuf(self);
793 iounmap(self->membase);
794 free_netdev(ndev);
795 platform_set_drvdata(pdev, NULL);
796
797 return 0;
798}
799
800static struct platform_driver sh_sir_driver = {
801 .probe = sh_sir_probe,
802 .remove = __devexit_p(sh_sir_remove),
803 .driver = {
804 .name = DRIVER_NAME,
805 },
806};
807
808static int __init sh_sir_init(void)
809{
810 return platform_driver_register(&sh_sir_driver);
811}
812
813static void __exit sh_sir_exit(void)
814{
815 platform_driver_unregister(&sh_sir_driver);
816}
817
818module_init(sh_sir_init);
819module_exit(sh_sir_exit);
820
821MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
822MODULE_DESCRIPTION("SuperH IrDA driver");
823MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index fddb4efd5453..6533c010cf5c 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -121,7 +121,7 @@ static void iodelay(int udelay)
121 } 121 }
122} 122}
123 123
124static struct pci_device_id via_pci_tbl[] = { 124static DEFINE_PCI_DEVICE_TABLE(via_pci_tbl) = {
125 { PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 }, 125 { PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
126 { PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 }, 126 { PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
127 { PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 }, 127 { PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index bd3c6b5ee76a..209d4bcfaced 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -59,7 +59,7 @@ MODULE_LICENSE("GPL");
59 59
60static /* const */ char drivername[] = DRIVER_NAME; 60static /* const */ char drivername[] = DRIVER_NAME;
61 61
62static struct pci_device_id vlsi_irda_table [] = { 62static DEFINE_PCI_DEVICE_TABLE(vlsi_irda_table) = {
63 { 63 {
64 .class = PCI_CLASS_WIRELESS_IRDA << 8, 64 .class = PCI_CLASS_WIRELESS_IRDA << 8,
65 .class_mask = PCI_CLASS_SUBCLASS_MASK << 8, 65 .class_mask = PCI_CLASS_SUBCLASS_MASK << 8,
diff --git a/drivers/net/isa-skeleton.c b/drivers/net/isa-skeleton.c
deleted file mode 100644
index 04d0502726c0..000000000000
--- a/drivers/net/isa-skeleton.c
+++ /dev/null
@@ -1,718 +0,0 @@
1/* isa-skeleton.c: A network driver outline for linux.
2 *
3 * Written 1993-94 by Donald Becker.
4 *
5 * Copyright 1993 United States Government as represented by the
6 * Director, National Security Agency.
7 *
8 * This software may be used and distributed according to the terms
9 * of the GNU General Public License, incorporated herein by reference.
10 *
11 * The author may be reached as becker@scyld.com, or C/O
12 * Scyld Computing Corporation
13 * 410 Severn Ave., Suite 210
14 * Annapolis MD 21403
15 *
16 * This file is an outline for writing a network device driver for the
17 * the Linux operating system.
18 *
19 * To write (or understand) a driver, have a look at the "loopback.c" file to
20 * get a feel of what is going on, and then use the code below as a skeleton
21 * for the new driver.
22 *
23 */
24
25static const char *version =
26 "isa-skeleton.c:v1.51 9/24/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
27
28/*
29 * Sources:
30 * List your sources of programming information to document that
31 * the driver is your own creation, and give due credit to others
32 * that contributed to the work. Remember that GNU project code
33 * cannot use proprietary or trade secret information. Interface
34 * definitions are generally considered non-copyrightable to the
35 * extent that the same names and structures must be used to be
36 * compatible.
37 *
38 * Finally, keep in mind that the Linux kernel is has an API, not
39 * ABI. Proprietary object-code-only distributions are not permitted
40 * under the GPL.
41 */
42
43#include <linux/module.h>
44#include <linux/kernel.h>
45#include <linux/types.h>
46#include <linux/fcntl.h>
47#include <linux/interrupt.h>
48#include <linux/ioport.h>
49#include <linux/in.h>
50#include <linux/slab.h>
51#include <linux/string.h>
52#include <linux/spinlock.h>
53#include <linux/errno.h>
54#include <linux/init.h>
55#include <linux/netdevice.h>
56#include <linux/etherdevice.h>
57#include <linux/skbuff.h>
58#include <linux/bitops.h>
59
60#include <asm/system.h>
61#include <asm/io.h>
62#include <asm/dma.h>
63
64/*
65 * The name of the card. Is used for messages and in the requests for
66 * io regions, irqs and dma channels
67 */
68static const char* cardname = "netcard";
69
70/* First, a few definitions that the brave might change. */
71
72/* A zero-terminated list of I/O addresses to be probed. */
73static unsigned int netcard_portlist[] __initdata =
74 { 0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0};
75
76/* use 0 for production, 1 for verification, >2 for debug */
77#ifndef NET_DEBUG
78#define NET_DEBUG 2
79#endif
80static unsigned int net_debug = NET_DEBUG;
81
82/* The number of low I/O ports used by the ethercard. */
83#define NETCARD_IO_EXTENT 32
84
85#define MY_TX_TIMEOUT ((400*HZ)/1000)
86
87/* Information that need to be kept for each board. */
88struct net_local {
89 struct net_device_stats stats;
90 long open_time; /* Useless example local info. */
91
92 /* Tx control lock. This protects the transmit buffer ring
93 * state along with the "tx full" state of the driver. This
94 * means all netif_queue flow control actions are protected
95 * by this lock as well.
96 */
97 spinlock_t lock;
98};
99
100/* The station (ethernet) address prefix, used for IDing the board. */
101#define SA_ADDR0 0x00
102#define SA_ADDR1 0x42
103#define SA_ADDR2 0x65
104
105/* Index to functions, as function prototypes. */
106
107static int netcard_probe1(struct net_device *dev, int ioaddr);
108static int net_open(struct net_device *dev);
109static int net_send_packet(struct sk_buff *skb, struct net_device *dev);
110static irqreturn_t net_interrupt(int irq, void *dev_id);
111static void net_rx(struct net_device *dev);
112static int net_close(struct net_device *dev);
113static struct net_device_stats *net_get_stats(struct net_device *dev);
114static void set_multicast_list(struct net_device *dev);
115static void net_tx_timeout(struct net_device *dev);
116
117
118/* Example routines you must write ;->. */
119#define tx_done(dev) 1
120static void hardware_send_packet(short ioaddr, char *buf, int length);
121static void chipset_init(struct net_device *dev, int startp);
122
123/*
124 * Check for a network adaptor of this type, and return '0' iff one exists.
125 * If dev->base_addr == 0, probe all likely locations.
126 * If dev->base_addr == 1, always return failure.
127 * If dev->base_addr == 2, allocate space for the device and return success
128 * (detachable devices only).
129 */
130static int __init do_netcard_probe(struct net_device *dev)
131{
132 int i;
133 int base_addr = dev->base_addr;
134 int irq = dev->irq;
135
136 if (base_addr > 0x1ff) /* Check a single specified location. */
137 return netcard_probe1(dev, base_addr);
138 else if (base_addr != 0) /* Don't probe at all. */
139 return -ENXIO;
140
141 for (i = 0; netcard_portlist[i]; i++) {
142 int ioaddr = netcard_portlist[i];
143 if (netcard_probe1(dev, ioaddr) == 0)
144 return 0;
145 dev->irq = irq;
146 }
147
148 return -ENODEV;
149}
150
151static void cleanup_card(struct net_device *dev)
152{
153#ifdef jumpered_dma
154 free_dma(dev->dma);
155#endif
156#ifdef jumpered_interrupts
157 free_irq(dev->irq, dev);
158#endif
159 release_region(dev->base_addr, NETCARD_IO_EXTENT);
160}
161
162#ifndef MODULE
163struct net_device * __init netcard_probe(int unit)
164{
165 struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
166 int err;
167
168 if (!dev)
169 return ERR_PTR(-ENOMEM);
170
171 sprintf(dev->name, "eth%d", unit);
172 netdev_boot_setup_check(dev);
173
174 err = do_netcard_probe(dev);
175 if (err)
176 goto out;
177 return dev;
178out:
179 free_netdev(dev);
180 return ERR_PTR(err);
181}
182#endif
183
184static const struct net_device_ops netcard_netdev_ops = {
185 .ndo_open = net_open,
186 .ndo_stop = net_close,
187 .ndo_start_xmit = net_send_packet,
188 .ndo_get_stats = net_get_stats,
189 .ndo_set_multicast_list = set_multicast_list,
190 .ndo_tx_timeout = net_tx_timeout,
191 .ndo_validate_addr = eth_validate_addr,
192 .ndo_set_mac_address = eth_mac_addr,
193 .ndo_change_mtu = eth_change_mtu,
194};
195
196/*
197 * This is the real probe routine. Linux has a history of friendly device
198 * probes on the ISA bus. A good device probes avoids doing writes, and
199 * verifies that the correct device exists and functions.
200 */
201static int __init netcard_probe1(struct net_device *dev, int ioaddr)
202{
203 struct net_local *np;
204 static unsigned version_printed;
205 int i;
206 int err = -ENODEV;
207
208 /* Grab the region so that no one else tries to probe our ioports. */
209 if (!request_region(ioaddr, NETCARD_IO_EXTENT, cardname))
210 return -EBUSY;
211
212 /*
213 * For ethernet adaptors the first three octets of the station address
214 * contains the manufacturer's unique code. That might be a good probe
215 * method. Ideally you would add additional checks.
216 */
217 if (inb(ioaddr + 0) != SA_ADDR0 ||
218 inb(ioaddr + 1) != SA_ADDR1 ||
219 inb(ioaddr + 2) != SA_ADDR2)
220 goto out;
221
222 if (net_debug && version_printed++ == 0)
223 printk(KERN_DEBUG "%s", version);
224
225 printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cardname, ioaddr);
226
227 /* Fill in the 'dev' fields. */
228 dev->base_addr = ioaddr;
229
230 /* Retrieve and print the ethernet address. */
231 for (i = 0; i < 6; i++)
232 dev->dev_addr[i] = inb(ioaddr + i);
233
234 printk("%pM", dev->dev_addr);
235
236 err = -EAGAIN;
237#ifdef jumpered_interrupts
238 /*
239 * If this board has jumpered interrupts, allocate the interrupt
240 * vector now. There is no point in waiting since no other device
241 * can use the interrupt, and this marks the irq as busy. Jumpered
242 * interrupts are typically not reported by the boards, and we must
243 * used autoIRQ to find them.
244 */
245
246 if (dev->irq == -1)
247 ; /* Do nothing: a user-level program will set it. */
248 else if (dev->irq < 2) { /* "Auto-IRQ" */
249 unsigned long irq_mask = probe_irq_on();
250 /* Trigger an interrupt here. */
251
252 dev->irq = probe_irq_off(irq_mask);
253 if (net_debug >= 2)
254 printk(" autoirq is %d", dev->irq);
255 } else if (dev->irq == 2)
256 /*
257 * Fixup for users that don't know that IRQ 2 is really
258 * IRQ9, or don't know which one to set.
259 */
260 dev->irq = 9;
261
262 {
263 int irqval = request_irq(dev->irq, net_interrupt, 0, cardname, dev);
264 if (irqval) {
265 printk("%s: unable to get IRQ %d (irqval=%d).\n",
266 dev->name, dev->irq, irqval);
267 goto out;
268 }
269 }
270#endif /* jumpered interrupt */
271#ifdef jumpered_dma
272 /*
273 * If we use a jumpered DMA channel, that should be probed for and
274 * allocated here as well. See lance.c for an example.
275 */
276 if (dev->dma == 0) {
277 if (request_dma(dev->dma, cardname)) {
278 printk("DMA %d allocation failed.\n", dev->dma);
279 goto out1;
280 } else
281 printk(", assigned DMA %d.\n", dev->dma);
282 } else {
283 short dma_status, new_dma_status;
284
285 /* Read the DMA channel status registers. */
286 dma_status = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
287 (inb(DMA2_STAT_REG) & 0xf0);
288 /* Trigger a DMA request, perhaps pause a bit. */
289 outw(0x1234, ioaddr + 8);
290 /* Re-read the DMA status registers. */
291 new_dma_status = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
292 (inb(DMA2_STAT_REG) & 0xf0);
293 /*
294 * Eliminate the old and floating requests,
295 * and DMA4 the cascade.
296 */
297 new_dma_status ^= dma_status;
298 new_dma_status &= ~0x10;
299 for (i = 7; i > 0; i--)
300 if (test_bit(i, &new_dma_status)) {
301 dev->dma = i;
302 break;
303 }
304 if (i <= 0) {
305 printk("DMA probe failed.\n");
306 goto out1;
307 }
308 if (request_dma(dev->dma, cardname)) {
309 printk("probed DMA %d allocation failed.\n", dev->dma);
310 goto out1;
311 }
312 }
313#endif /* jumpered DMA */
314
315 np = netdev_priv(dev);
316 spin_lock_init(&np->lock);
317
318 dev->netdev_ops = &netcard_netdev_ops;
319 dev->watchdog_timeo = MY_TX_TIMEOUT;
320
321 err = register_netdev(dev);
322 if (err)
323 goto out2;
324 return 0;
325out2:
326#ifdef jumpered_dma
327 free_dma(dev->dma);
328#endif
329out1:
330#ifdef jumpered_interrupts
331 free_irq(dev->irq, dev);
332#endif
333out:
334 release_region(base_addr, NETCARD_IO_EXTENT);
335 return err;
336}
337
338static void net_tx_timeout(struct net_device *dev)
339{
340 struct net_local *np = netdev_priv(dev);
341
342 printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
343 tx_done(dev) ? "IRQ conflict" : "network cable problem");
344
345 /* Try to restart the adaptor. */
346 chipset_init(dev, 1);
347
348 np->stats.tx_errors++;
349
350 /* If we have space available to accept new transmit
351 * requests, wake up the queueing layer. This would
352 * be the case if the chipset_init() call above just
353 * flushes out the tx queue and empties it.
354 *
355 * If instead, the tx queue is retained then the
356 * netif_wake_queue() call should be placed in the
357 * TX completion interrupt handler of the driver instead
358 * of here.
359 */
360 if (!tx_full(dev))
361 netif_wake_queue(dev);
362}
363
364/*
365 * Open/initialize the board. This is called (in the current kernel)
366 * sometime after booting when the 'ifconfig' program is run.
367 *
368 * This routine should set everything up anew at each open, even
369 * registers that "should" only need to be set once at boot, so that
370 * there is non-reboot way to recover if something goes wrong.
371 */
372static int
373net_open(struct net_device *dev)
374{
375 struct net_local *np = netdev_priv(dev);
376 int ioaddr = dev->base_addr;
377 /*
378 * This is used if the interrupt line can turned off (shared).
379 * See 3c503.c for an example of selecting the IRQ at config-time.
380 */
381 if (request_irq(dev->irq, net_interrupt, 0, cardname, dev)) {
382 return -EAGAIN;
383 }
384 /*
385 * Always allocate the DMA channel after the IRQ,
386 * and clean up on failure.
387 */
388 if (request_dma(dev->dma, cardname)) {
389 free_irq(dev->irq, dev);
390 return -EAGAIN;
391 }
392
393 /* Reset the hardware here. Don't forget to set the station address. */
394 chipset_init(dev, 1);
395 outb(0x00, ioaddr);
396 np->open_time = jiffies;
397
398 /* We are now ready to accept transmit requeusts from
399 * the queueing layer of the networking.
400 */
401 netif_start_queue(dev);
402
403 return 0;
404}
405
406/* This will only be invoked if your driver is _not_ in XOFF state.
407 * What this means is that you need not check it, and that this
408 * invariant will hold if you make sure that the netif_*_queue()
409 * calls are done at the proper times.
410 */
411static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
412{
413 struct net_local *np = netdev_priv(dev);
414 int ioaddr = dev->base_addr;
415 short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
416 unsigned char *buf = skb->data;
417
418 /* If some error occurs while trying to transmit this
419 * packet, you should return '1' from this function.
420 * In such a case you _may not_ do anything to the
421 * SKB, it is still owned by the network queueing
422 * layer when an error is returned. This means you
423 * may not modify any SKB fields, you may not free
424 * the SKB, etc.
425 */
426
427#if TX_RING
428 /* This is the most common case for modern hardware.
429 * The spinlock protects this code from the TX complete
430 * hardware interrupt handler. Queue flow control is
431 * thus managed under this lock as well.
432 */
433 unsigned long flags;
434 spin_lock_irqsave(&np->lock, flags);
435
436 add_to_tx_ring(np, skb, length);
437 dev->trans_start = jiffies;
438
439 /* If we just used up the very last entry in the
440 * TX ring on this device, tell the queueing
441 * layer to send no more.
442 */
443 if (tx_full(dev))
444 netif_stop_queue(dev);
445
446 /* When the TX completion hw interrupt arrives, this
447 * is when the transmit statistics are updated.
448 */
449
450 spin_unlock_irqrestore(&np->lock, flags);
451#else
452 /* This is the case for older hardware which takes
453 * a single transmit buffer at a time, and it is
454 * just written to the device via PIO.
455 *
456 * No spin locking is needed since there is no TX complete
457 * event. If by chance your card does have a TX complete
458 * hardware IRQ then you may need to utilize np->lock here.
459 */
460 hardware_send_packet(ioaddr, buf, length);
461 np->stats.tx_bytes += skb->len;
462
463 dev->trans_start = jiffies;
464
465 /* You might need to clean up and record Tx statistics here. */
466 if (inw(ioaddr) == /*RU*/81)
467 np->stats.tx_aborted_errors++;
468 dev_kfree_skb (skb);
469#endif
470
471 return NETDEV_TX_OK;
472}
473
474#if TX_RING
475/* This handles TX complete events posted by the device
476 * via interrupts.
477 */
478void net_tx(struct net_device *dev)
479{
480 struct net_local *np = netdev_priv(dev);
481 int entry;
482
483 /* This protects us from concurrent execution of
484 * our dev->hard_start_xmit function above.
485 */
486 spin_lock(&np->lock);
487
488 entry = np->tx_old;
489 while (tx_entry_is_sent(np, entry)) {
490 struct sk_buff *skb = np->skbs[entry];
491
492 np->stats.tx_bytes += skb->len;
493 dev_kfree_skb_irq (skb);
494
495 entry = next_tx_entry(np, entry);
496 }
497 np->tx_old = entry;
498
499 /* If we had stopped the queue due to a "tx full"
500 * condition, and space has now been made available,
501 * wake up the queue.
502 */
503 if (netif_queue_stopped(dev) && ! tx_full(dev))
504 netif_wake_queue(dev);
505
506 spin_unlock(&np->lock);
507}
508#endif
509
510/*
511 * The typical workload of the driver:
512 * Handle the network interface interrupts.
513 */
514static irqreturn_t net_interrupt(int irq, void *dev_id)
515{
516 struct net_device *dev = dev_id;
517 struct net_local *np;
518 int ioaddr, status;
519 int handled = 0;
520
521 ioaddr = dev->base_addr;
522
523 np = netdev_priv(dev);
524 status = inw(ioaddr + 0);
525
526 if (status == 0)
527 goto out;
528 handled = 1;
529
530 if (status & RX_INTR) {
531 /* Got a packet(s). */
532 net_rx(dev);
533 }
534#if TX_RING
535 if (status & TX_INTR) {
536 /* Transmit complete. */
537 net_tx(dev);
538 np->stats.tx_packets++;
539 netif_wake_queue(dev);
540 }
541#endif
542 if (status & COUNTERS_INTR) {
543 /* Increment the appropriate 'localstats' field. */
544 np->stats.tx_window_errors++;
545 }
546out:
547 return IRQ_RETVAL(handled);
548}
549
550/* We have a good packet(s), get it/them out of the buffers. */
551static void
552net_rx(struct net_device *dev)
553{
554 struct net_local *lp = netdev_priv(dev);
555 int ioaddr = dev->base_addr;
556 int boguscount = 10;
557
558 do {
559 int status = inw(ioaddr);
560 int pkt_len = inw(ioaddr);
561
562 if (pkt_len == 0) /* Read all the frames? */
563 break; /* Done for now */
564
565 if (status & 0x40) { /* There was an error. */
566 lp->stats.rx_errors++;
567 if (status & 0x20) lp->stats.rx_frame_errors++;
568 if (status & 0x10) lp->stats.rx_over_errors++;
569 if (status & 0x08) lp->stats.rx_crc_errors++;
570 if (status & 0x04) lp->stats.rx_fifo_errors++;
571 } else {
572 /* Malloc up new buffer. */
573 struct sk_buff *skb;
574
575 lp->stats.rx_bytes+=pkt_len;
576
577 skb = dev_alloc_skb(pkt_len);
578 if (skb == NULL) {
579 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
580 dev->name);
581 lp->stats.rx_dropped++;
582 break;
583 }
584 skb->dev = dev;
585
586 /* 'skb->data' points to the start of sk_buff data area. */
587 memcpy(skb_put(skb,pkt_len), (void*)dev->rmem_start,
588 pkt_len);
589 /* or */
590 insw(ioaddr, skb->data, (pkt_len + 1) >> 1);
591
592 netif_rx(skb);
593 lp->stats.rx_packets++;
594 lp->stats.rx_bytes += pkt_len;
595 }
596 } while (--boguscount);
597
598 return;
599}
600
601/* The inverse routine to net_open(). */
602static int
603net_close(struct net_device *dev)
604{
605 struct net_local *lp = netdev_priv(dev);
606 int ioaddr = dev->base_addr;
607
608 lp->open_time = 0;
609
610 netif_stop_queue(dev);
611
612 /* Flush the Tx and disable Rx here. */
613
614 disable_dma(dev->dma);
615
616 /* If not IRQ or DMA jumpered, free up the line. */
617 outw(0x00, ioaddr+0); /* Release the physical interrupt line. */
618
619 free_irq(dev->irq, dev);
620 free_dma(dev->dma);
621
622 /* Update the statistics here. */
623
624 return 0;
625
626}
627
628/*
629 * Get the current statistics.
630 * This may be called with the card open or closed.
631 */
632static struct net_device_stats *net_get_stats(struct net_device *dev)
633{
634 struct net_local *lp = netdev_priv(dev);
635 short ioaddr = dev->base_addr;
636
637 /* Update the statistics from the device registers. */
638 lp->stats.rx_missed_errors = inw(ioaddr+1);
639 return &lp->stats;
640}
641
642/*
643 * Set or clear the multicast filter for this adaptor.
644 * num_addrs == -1 Promiscuous mode, receive all packets
645 * num_addrs == 0 Normal mode, clear multicast list
646 * num_addrs > 0 Multicast mode, receive normal and MC packets,
647 * and do best-effort filtering.
648 */
649static void
650set_multicast_list(struct net_device *dev)
651{
652 short ioaddr = dev->base_addr;
653 if (dev->flags&IFF_PROMISC)
654 {
655 /* Enable promiscuous mode */
656 outw(MULTICAST|PROMISC, ioaddr);
657 }
658 else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS)
659 {
660 /* Disable promiscuous mode, use normal mode. */
661 hardware_set_filter(NULL);
662
663 outw(MULTICAST, ioaddr);
664 }
665 else if(dev->mc_count)
666 {
667 /* Walk the address list, and load the filter */
668 hardware_set_filter(dev->mc_list);
669
670 outw(MULTICAST, ioaddr);
671 }
672 else
673 outw(0, ioaddr);
674}
675
676#ifdef MODULE
677
678static struct net_device *this_device;
679static int io = 0x300;
680static int irq;
681static int dma;
682static int mem;
683MODULE_LICENSE("GPL");
684
685int init_module(void)
686{
687 struct net_device *dev;
688 int result;
689
690 if (io == 0)
691 printk(KERN_WARNING "%s: You shouldn't use auto-probing with insmod!\n",
692 cardname);
693 dev = alloc_etherdev(sizeof(struct net_local));
694 if (!dev)
695 return -ENOMEM;
696
697 /* Copy the parameters from insmod into the device structure. */
698 dev->base_addr = io;
699 dev->irq = irq;
700 dev->dma = dma;
701 dev->mem_start = mem;
702 if (do_netcard_probe(dev) == 0) {
703 this_device = dev;
704 return 0;
705 }
706 free_netdev(dev);
707 return -ENXIO;
708}
709
710void
711cleanup_module(void)
712{
713 unregister_netdev(this_device);
714 cleanup_card(this_device);
715 free_netdev(this_device);
716}
717
718#endif /* MODULE */
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 16c91910d6c1..966de5d69521 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -958,18 +958,17 @@ static void veth_set_multicast_list(struct net_device *dev)
958 write_lock_irqsave(&port->mcast_gate, flags); 958 write_lock_irqsave(&port->mcast_gate, flags);
959 959
960 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) || 960 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
961 (dev->mc_count > VETH_MAX_MCAST)) { 961 (netdev_mc_count(dev) > VETH_MAX_MCAST)) {
962 port->promiscuous = 1; 962 port->promiscuous = 1;
963 } else { 963 } else {
964 struct dev_mc_list *dmi = dev->mc_list; 964 struct dev_mc_list *dmi;
965 int i;
966 965
967 port->promiscuous = 0; 966 port->promiscuous = 0;
968 967
969 /* Update table */ 968 /* Update table */
970 port->num_mcast = 0; 969 port->num_mcast = 0;
971 970
972 for (i = 0; i < dev->mc_count; i++) { 971 netdev_for_each_mc_addr(dmi, dev) {
973 u8 *addr = dmi->dmi_addr; 972 u8 *addr = dmi->dmi_addr;
974 u64 xaddr = 0; 973 u64 xaddr = 0;
975 974
@@ -978,7 +977,6 @@ static void veth_set_multicast_list(struct net_device *dev)
978 port->mcast_addr[port->num_mcast] = xaddr; 977 port->mcast_addr[port->num_mcast] = xaddr;
979 port->num_mcast++; 978 port->num_mcast++;
980 } 979 }
981 dmi = dmi->next;
982 } 980 }
983 } 981 }
984 982
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 5257ae08b9f9..92d2e71d0c8b 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -75,19 +75,14 @@ struct ixgb_adapter;
75#include "ixgb_ee.h" 75#include "ixgb_ee.h"
76#include "ixgb_ids.h" 76#include "ixgb_ids.h"
77 77
78#define PFX "ixgb: "
79
78#ifdef _DEBUG_DRIVER_ 80#ifdef _DEBUG_DRIVER_
79#define IXGB_DBG(args...) printk(KERN_DEBUG "ixgb: " args) 81#define IXGB_DBG(args...) printk(KERN_DEBUG PFX args)
80#else 82#else
81#define IXGB_DBG(args...) 83#define IXGB_DBG(args...)
82#endif 84#endif
83 85
84#define PFX "ixgb: "
85#define DPRINTK(nlevel, klevel, fmt, args...) \
86 (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
87 printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
88 __func__ , ## args))
89
90
91/* TX/RX descriptor defines */ 86/* TX/RX descriptor defines */
92#define DEFAULT_TXD 256 87#define DEFAULT_TXD 256
93#define MAX_TXD 4096 88#define MAX_TXD 4096
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 593d1a4f217c..c9fef65cb98b 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -50,7 +50,7 @@ MODULE_PARM_DESC(copybreak,
50 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 50 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
51 * Class, Class Mask, private data (not used) } 51 * Class, Class Mask, private data (not used) }
52 */ 52 */
53static struct pci_device_id ixgb_pci_tbl[] = { 53static DEFINE_PCI_DEVICE_TABLE(ixgb_pci_tbl) = {
54 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX, 54 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
55 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 55 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
56 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4, 56 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
@@ -238,8 +238,8 @@ ixgb_up(struct ixgb_adapter *adapter)
238 if (err) { 238 if (err) {
239 if (adapter->have_msi) 239 if (adapter->have_msi)
240 pci_disable_msi(adapter->pdev); 240 pci_disable_msi(adapter->pdev);
241 DPRINTK(PROBE, ERR, 241 netif_err(adapter, probe, adapter->netdev,
242 "Unable to allocate interrupt Error: %d\n", err); 242 "Unable to allocate interrupt Error: %d\n", err);
243 return err; 243 return err;
244 } 244 }
245 245
@@ -310,7 +310,7 @@ ixgb_reset(struct ixgb_adapter *adapter)
310 310
311 ixgb_adapter_stop(hw); 311 ixgb_adapter_stop(hw);
312 if (!ixgb_init_hw(hw)) 312 if (!ixgb_init_hw(hw))
313 DPRINTK(PROBE, ERR, "ixgb_init_hw failed.\n"); 313 netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n");
314 314
315 /* restore frame size information */ 315 /* restore frame size information */
316 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT); 316 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
@@ -447,7 +447,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
447 /* make sure the EEPROM is good */ 447 /* make sure the EEPROM is good */
448 448
449 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) { 449 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
450 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); 450 netif_err(adapter, probe, adapter->netdev,
451 "The EEPROM Checksum Is Not Valid\n");
451 err = -EIO; 452 err = -EIO;
452 goto err_eeprom; 453 goto err_eeprom;
453 } 454 }
@@ -456,7 +457,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
456 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); 457 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
457 458
458 if (!is_valid_ether_addr(netdev->perm_addr)) { 459 if (!is_valid_ether_addr(netdev->perm_addr)) {
459 DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); 460 netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
460 err = -EIO; 461 err = -EIO;
461 goto err_eeprom; 462 goto err_eeprom;
462 } 463 }
@@ -477,7 +478,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
477 /* carrier off reporting is important to ethtool even BEFORE open */ 478 /* carrier off reporting is important to ethtool even BEFORE open */
478 netif_carrier_off(netdev); 479 netif_carrier_off(netdev);
479 480
480 DPRINTK(PROBE, INFO, "Intel(R) PRO/10GbE Network Connection\n"); 481 netif_info(adapter, probe, adapter->netdev,
482 "Intel(R) PRO/10GbE Network Connection\n");
481 ixgb_check_options(adapter); 483 ixgb_check_options(adapter);
482 /* reset the hardware with the new settings */ 484 /* reset the hardware with the new settings */
483 485
@@ -552,14 +554,14 @@ ixgb_sw_init(struct ixgb_adapter *adapter)
552 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; 554 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
553 adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */ 555 adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
554 556
555 if ((hw->device_id == IXGB_DEVICE_ID_82597EX) 557 if ((hw->device_id == IXGB_DEVICE_ID_82597EX) ||
556 || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) 558 (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) ||
557 || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) 559 (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) ||
558 || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR)) 560 (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
559 hw->mac_type = ixgb_82597; 561 hw->mac_type = ixgb_82597;
560 else { 562 else {
561 /* should never have loaded on this device */ 563 /* should never have loaded on this device */
562 DPRINTK(PROBE, ERR, "unsupported device id\n"); 564 netif_err(adapter, probe, adapter->netdev, "unsupported device id\n");
563 } 565 }
564 566
565 /* enable flow control to be programmed */ 567 /* enable flow control to be programmed */
@@ -661,8 +663,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
661 size = sizeof(struct ixgb_buffer) * txdr->count; 663 size = sizeof(struct ixgb_buffer) * txdr->count;
662 txdr->buffer_info = vmalloc(size); 664 txdr->buffer_info = vmalloc(size);
663 if (!txdr->buffer_info) { 665 if (!txdr->buffer_info) {
664 DPRINTK(PROBE, ERR, 666 netif_err(adapter, probe, adapter->netdev,
665 "Unable to allocate transmit descriptor ring memory\n"); 667 "Unable to allocate transmit descriptor ring memory\n");
666 return -ENOMEM; 668 return -ENOMEM;
667 } 669 }
668 memset(txdr->buffer_info, 0, size); 670 memset(txdr->buffer_info, 0, size);
@@ -675,8 +677,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
675 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 677 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
676 if (!txdr->desc) { 678 if (!txdr->desc) {
677 vfree(txdr->buffer_info); 679 vfree(txdr->buffer_info);
678 DPRINTK(PROBE, ERR, 680 netif_err(adapter, probe, adapter->netdev,
679 "Unable to allocate transmit descriptor memory\n"); 681 "Unable to allocate transmit descriptor memory\n");
680 return -ENOMEM; 682 return -ENOMEM;
681 } 683 }
682 memset(txdr->desc, 0, txdr->size); 684 memset(txdr->desc, 0, txdr->size);
@@ -750,8 +752,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
750 size = sizeof(struct ixgb_buffer) * rxdr->count; 752 size = sizeof(struct ixgb_buffer) * rxdr->count;
751 rxdr->buffer_info = vmalloc(size); 753 rxdr->buffer_info = vmalloc(size);
752 if (!rxdr->buffer_info) { 754 if (!rxdr->buffer_info) {
753 DPRINTK(PROBE, ERR, 755 netif_err(adapter, probe, adapter->netdev,
754 "Unable to allocate receive descriptor ring\n"); 756 "Unable to allocate receive descriptor ring\n");
755 return -ENOMEM; 757 return -ENOMEM;
756 } 758 }
757 memset(rxdr->buffer_info, 0, size); 759 memset(rxdr->buffer_info, 0, size);
@@ -765,8 +767,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
765 767
766 if (!rxdr->desc) { 768 if (!rxdr->desc) {
767 vfree(rxdr->buffer_info); 769 vfree(rxdr->buffer_info);
768 DPRINTK(PROBE, ERR, 770 netif_err(adapter, probe, adapter->netdev,
769 "Unable to allocate receive descriptors\n"); 771 "Unable to allocate receive descriptors\n");
770 return -ENOMEM; 772 return -ENOMEM;
771 } 773 }
772 memset(rxdr->desc, 0, rxdr->size); 774 memset(rxdr->desc, 0, rxdr->size);
@@ -1077,7 +1079,7 @@ ixgb_set_multi(struct net_device *netdev)
1077 rctl |= IXGB_RCTL_VFE; 1079 rctl |= IXGB_RCTL_VFE;
1078 } 1080 }
1079 1081
1080 if (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) { 1082 if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
1081 rctl |= IXGB_RCTL_MPE; 1083 rctl |= IXGB_RCTL_MPE;
1082 IXGB_WRITE_REG(hw, RCTL, rctl); 1084 IXGB_WRITE_REG(hw, RCTL, rctl);
1083 } else { 1085 } else {
@@ -1086,13 +1088,12 @@ ixgb_set_multi(struct net_device *netdev)
1086 1088
1087 IXGB_WRITE_REG(hw, RCTL, rctl); 1089 IXGB_WRITE_REG(hw, RCTL, rctl);
1088 1090
1089 for (i = 0, mc_ptr = netdev->mc_list; 1091 i = 0;
1090 mc_ptr; 1092 netdev_for_each_mc_addr(mc_ptr, netdev)
1091 i++, mc_ptr = mc_ptr->next) 1093 memcpy(&mta[i++ * IXGB_ETH_LENGTH_OF_ADDRESS],
1092 memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS],
1093 mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS); 1094 mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
1094 1095
1095 ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0); 1096 ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
1096 } 1097 }
1097} 1098}
1098 1099
@@ -1580,7 +1581,8 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1580 /* MTU < 68 is an error for IPv4 traffic, just don't allow it */ 1581 /* MTU < 68 is an error for IPv4 traffic, just don't allow it */
1581 if ((new_mtu < 68) || 1582 if ((new_mtu < 68) ||
1582 (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) { 1583 (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
1583 DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu); 1584 netif_err(adapter, probe, adapter->netdev,
1585 "Invalid MTU setting %d\n", new_mtu);
1584 return -EINVAL; 1586 return -EINVAL;
1585 } 1587 }
1586 1588
@@ -1616,7 +1618,7 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
1616 return; 1618 return;
1617 1619
1618 if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) || 1620 if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1619 (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) { 1621 (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
1620 u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL); 1622 u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
1621 u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL); 1623 u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
1622 u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH); 1624 u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
@@ -1854,24 +1856,25 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1854 && !(IXGB_READ_REG(&adapter->hw, STATUS) & 1856 && !(IXGB_READ_REG(&adapter->hw, STATUS) &
1855 IXGB_STATUS_TXOFF)) { 1857 IXGB_STATUS_TXOFF)) {
1856 /* detected Tx unit hang */ 1858 /* detected Tx unit hang */
1857 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" 1859 netif_err(adapter, drv, adapter->netdev,
1858 " TDH <%x>\n" 1860 "Detected Tx Unit Hang\n"
1859 " TDT <%x>\n" 1861 " TDH <%x>\n"
1860 " next_to_use <%x>\n" 1862 " TDT <%x>\n"
1861 " next_to_clean <%x>\n" 1863 " next_to_use <%x>\n"
1862 "buffer_info[next_to_clean]\n" 1864 " next_to_clean <%x>\n"
1863 " time_stamp <%lx>\n" 1865 "buffer_info[next_to_clean]\n"
1864 " next_to_watch <%x>\n" 1866 " time_stamp <%lx>\n"
1865 " jiffies <%lx>\n" 1867 " next_to_watch <%x>\n"
1866 " next_to_watch.status <%x>\n", 1868 " jiffies <%lx>\n"
1867 IXGB_READ_REG(&adapter->hw, TDH), 1869 " next_to_watch.status <%x>\n",
1868 IXGB_READ_REG(&adapter->hw, TDT), 1870 IXGB_READ_REG(&adapter->hw, TDH),
1869 tx_ring->next_to_use, 1871 IXGB_READ_REG(&adapter->hw, TDT),
1870 tx_ring->next_to_clean, 1872 tx_ring->next_to_use,
1871 tx_ring->buffer_info[eop].time_stamp, 1873 tx_ring->next_to_clean,
1872 eop, 1874 tx_ring->buffer_info[eop].time_stamp,
1873 jiffies, 1875 eop,
1874 eop_desc->status); 1876 jiffies,
1877 eop_desc->status);
1875 netif_stop_queue(netdev); 1878 netif_stop_queue(netdev);
1876 } 1879 }
1877 } 1880 }
@@ -2269,7 +2272,8 @@ static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
2269 struct ixgb_adapter *adapter = netdev_priv(netdev); 2272 struct ixgb_adapter *adapter = netdev_priv(netdev);
2270 2273
2271 if (pci_enable_device(pdev)) { 2274 if (pci_enable_device(pdev)) {
2272 DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n"); 2275 netif_err(adapter, probe, adapter->netdev,
2276 "Cannot re-enable PCI device after reset\n");
2273 return PCI_ERS_RESULT_DISCONNECT; 2277 return PCI_ERS_RESULT_DISCONNECT;
2274 } 2278 }
2275 2279
@@ -2285,14 +2289,16 @@ static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
2285 2289
2286 /* Make sure the EEPROM is good */ 2290 /* Make sure the EEPROM is good */
2287 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) { 2291 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
2288 DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n"); 2292 netif_err(adapter, probe, adapter->netdev,
2293 "After reset, the EEPROM checksum is not valid\n");
2289 return PCI_ERS_RESULT_DISCONNECT; 2294 return PCI_ERS_RESULT_DISCONNECT;
2290 } 2295 }
2291 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); 2296 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
2292 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); 2297 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
2293 2298
2294 if (!is_valid_ether_addr(netdev->perm_addr)) { 2299 if (!is_valid_ether_addr(netdev->perm_addr)) {
2295 DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n"); 2300 netif_err(adapter, probe, adapter->netdev,
2301 "After reset, invalid MAC address\n");
2296 return PCI_ERS_RESULT_DISCONNECT; 2302 return PCI_ERS_RESULT_DISCONNECT;
2297 } 2303 }
2298 2304
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index bfef0ebcba9a..8f81efb49169 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -33,7 +33,8 @@
33obj-$(CONFIG_IXGBE) += ixgbe.o 33obj-$(CONFIG_IXGBE) += ixgbe.o
34 34
35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ 35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o 36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
37 ixgbe_mbx.o
37 38
38ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ 39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
39 ixgbe_dcb_82599.o ixgbe_dcb_nl.o 40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 303e7bd39b67..19e94ee155a2 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -98,6 +98,22 @@
98 98
99#define IXGBE_MAX_RSC_INT_RATE 162760 99#define IXGBE_MAX_RSC_INT_RATE 162760
100 100
101#define IXGBE_MAX_VF_MC_ENTRIES 30
102#define IXGBE_MAX_VF_FUNCTIONS 64
103#define IXGBE_MAX_VFTA_ENTRIES 128
104#define MAX_EMULATION_MAC_ADDRS 16
105#define VMDQ_P(p) ((p) + adapter->num_vfs)
106
107struct vf_data_storage {
108 unsigned char vf_mac_addresses[ETH_ALEN];
109 u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
110 u16 num_vf_mc_hashes;
111 u16 default_vf_vlan_id;
112 u16 vlans_enabled;
113 bool clear_to_send;
114 int rar;
115};
116
101/* wrapper around a pointer to a socket buffer, 117/* wrapper around a pointer to a socket buffer,
102 * so a DMA handle can be stored along with the buffer */ 118 * so a DMA handle can be stored along with the buffer */
103struct ixgbe_tx_buffer { 119struct ixgbe_tx_buffer {
@@ -159,6 +175,7 @@ struct ixgbe_ring {
159 175
160 struct ixgbe_queue_stats stats; 176 struct ixgbe_queue_stats stats;
161 unsigned long reinit_state; 177 unsigned long reinit_state;
178 int numa_node;
162 u64 rsc_count; /* stat for coalesced packets */ 179 u64 rsc_count; /* stat for coalesced packets */
163 u64 rsc_flush; /* stats for flushed packets */ 180 u64 rsc_flush; /* stats for flushed packets */
164 u32 restart_queue; /* track tx queue restarts */ 181 u32 restart_queue; /* track tx queue restarts */
@@ -171,7 +188,7 @@ struct ixgbe_ring {
171enum ixgbe_ring_f_enum { 188enum ixgbe_ring_f_enum {
172 RING_F_NONE = 0, 189 RING_F_NONE = 0,
173 RING_F_DCB, 190 RING_F_DCB,
174 RING_F_VMDQ, 191 RING_F_VMDQ, /* SR-IOV uses the same ring feature */
175 RING_F_RSS, 192 RING_F_RSS,
176 RING_F_FDIR, 193 RING_F_FDIR,
177#ifdef IXGBE_FCOE 194#ifdef IXGBE_FCOE
@@ -183,7 +200,7 @@ enum ixgbe_ring_f_enum {
183 200
184#define IXGBE_MAX_DCB_INDICES 8 201#define IXGBE_MAX_DCB_INDICES 8
185#define IXGBE_MAX_RSS_INDICES 16 202#define IXGBE_MAX_RSS_INDICES 16
186#define IXGBE_MAX_VMDQ_INDICES 16 203#define IXGBE_MAX_VMDQ_INDICES 64
187#define IXGBE_MAX_FDIR_INDICES 64 204#define IXGBE_MAX_FDIR_INDICES 64
188#ifdef IXGBE_FCOE 205#ifdef IXGBE_FCOE
189#define IXGBE_MAX_FCOE_INDICES 8 206#define IXGBE_MAX_FCOE_INDICES 8
@@ -277,7 +294,7 @@ struct ixgbe_adapter {
277 u16 eitr_high; 294 u16 eitr_high;
278 295
279 /* TX */ 296 /* TX */
280 struct ixgbe_ring *tx_ring ____cacheline_aligned_in_smp; /* One per active queue */ 297 struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
281 int num_tx_queues; 298 int num_tx_queues;
282 u32 tx_timeout_count; 299 u32 tx_timeout_count;
283 bool detect_tx_hung; 300 bool detect_tx_hung;
@@ -286,8 +303,10 @@ struct ixgbe_adapter {
286 u64 lsc_int; 303 u64 lsc_int;
287 304
288 /* RX */ 305 /* RX */
289 struct ixgbe_ring *rx_ring ____cacheline_aligned_in_smp; /* One per active queue */ 306 struct ixgbe_ring *rx_ring[MAX_RX_QUEUES] ____cacheline_aligned_in_smp;
290 int num_rx_queues; 307 int num_rx_queues;
308 int num_rx_pools; /* == num_rx_queues in 82598 */
309 int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
291 u64 hw_csum_rx_error; 310 u64 hw_csum_rx_error;
292 u64 hw_rx_no_dma_resources; 311 u64 hw_rx_no_dma_resources;
293 u64 non_eop_descs; 312 u64 non_eop_descs;
@@ -323,13 +342,14 @@ struct ixgbe_adapter {
323#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19) 342#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
324#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20) 343#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20)
325#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22) 344#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
326#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23) 345#define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 23)
327#define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 24) 346#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 24)
328#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25) 347#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 25)
329#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 26) 348#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 26)
330#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27) 349#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 27)
331#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 28) 350#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 28)
332#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29) 351#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 29)
352#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 30)
333 353
334 u32 flags2; 354 u32 flags2;
335#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1) 355#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
@@ -379,6 +399,13 @@ struct ixgbe_adapter {
379 u64 rsc_total_flush; 399 u64 rsc_total_flush;
380 u32 wol; 400 u32 wol;
381 u16 eeprom_version; 401 u16 eeprom_version;
402
403 int node;
404
405 /* SR-IOV */
406 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
407 unsigned int num_vfs;
408 struct vf_data_storage *vfinfo;
382}; 409};
383 410
384enum ixbge_state_t { 411enum ixbge_state_t {
@@ -426,6 +453,10 @@ extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
426extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 453extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
427 struct ixgbe_atr_input *input, 454 struct ixgbe_atr_input *input,
428 u8 queue); 455 u8 queue);
456extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
457 struct ixgbe_atr_input *input,
458 struct ixgbe_atr_input_masks *input_masks,
459 u16 soft_id, u8 queue);
429extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, 460extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input,
430 u16 vlan_id); 461 u16 vlan_id);
431extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, 462extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input,
@@ -440,6 +471,7 @@ extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
440 u16 flex_byte); 471 u16 flex_byte);
441extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, 472extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
442 u8 l4type); 473 u8 l4type);
474extern void ixgbe_set_rx_mode(struct net_device *netdev);
443#ifdef IXGBE_FCOE 475#ifdef IXGBE_FCOE
444extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); 476extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
445extern int ixgbe_fso(struct ixgbe_adapter *adapter, 477extern int ixgbe_fso(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index b49bd6b9feb7..1f30e163bd9c 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -31,6 +31,7 @@
31 31
32#include "ixgbe.h" 32#include "ixgbe.h"
33#include "ixgbe_phy.h" 33#include "ixgbe_phy.h"
34#include "ixgbe_mbx.h"
34 35
35#define IXGBE_82599_MAX_TX_QUEUES 128 36#define IXGBE_82599_MAX_TX_QUEUES 128
36#define IXGBE_82599_MAX_RX_QUEUES 128 37#define IXGBE_82599_MAX_RX_QUEUES 128
@@ -889,7 +890,7 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
889static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) 890static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
890{ 891{
891 s32 status = 0; 892 s32 status = 0;
892 u32 ctrl, ctrl_ext; 893 u32 ctrl;
893 u32 i; 894 u32 i;
894 u32 autoc; 895 u32 autoc;
895 u32 autoc2; 896 u32 autoc2;
@@ -944,15 +945,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
944 status = IXGBE_ERR_RESET_FAILED; 945 status = IXGBE_ERR_RESET_FAILED;
945 hw_dbg(hw, "Reset polling failed to complete.\n"); 946 hw_dbg(hw, "Reset polling failed to complete.\n");
946 } 947 }
947 /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
948 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
949 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
950 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
951 948
952 msleep(50); 949 msleep(50);
953 950
954
955
956 /* 951 /*
957 * Store the original AUTOC/AUTOC2 values if they have not been 952 * Store the original AUTOC/AUTOC2 values if they have not been
958 * stored off yet. Otherwise restore the stored original 953 * stored off yet. Otherwise restore the stored original
@@ -1095,9 +1090,11 @@ static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1095 bool vlan_on) 1090 bool vlan_on)
1096{ 1091{
1097 u32 regindex; 1092 u32 regindex;
1093 u32 vlvf_index;
1098 u32 bitindex; 1094 u32 bitindex;
1099 u32 bits; 1095 u32 bits;
1100 u32 first_empty_slot; 1096 u32 first_empty_slot;
1097 u32 vt_ctl;
1101 1098
1102 if (vlan > 4095) 1099 if (vlan > 4095)
1103 return IXGBE_ERR_PARAM; 1100 return IXGBE_ERR_PARAM;
@@ -1124,76 +1121,84 @@ static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1124 1121
1125 1122
1126 /* Part 2 1123 /* Part 2
1127 * If the vind is set 1124 * If VT mode is set
1128 * Either vlan_on 1125 * Either vlan_on
1129 * make sure the vlan is in VLVF 1126 * make sure the vlan is in VLVF
1130 * set the vind bit in the matching VLVFB 1127 * set the vind bit in the matching VLVFB
1131 * Or !vlan_on 1128 * Or !vlan_on
1132 * clear the pool bit and possibly the vind 1129 * clear the pool bit and possibly the vind
1133 */ 1130 */
1134 if (vind) { 1131 vt_ctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
1135 /* find the vlanid or the first empty slot */ 1132 if (!(vt_ctl & IXGBE_VT_CTL_VT_ENABLE))
1136 first_empty_slot = 0; 1133 goto out;
1137
1138 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
1139 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
1140 if (!bits && !first_empty_slot)
1141 first_empty_slot = regindex;
1142 else if ((bits & 0x0FFF) == vlan)
1143 break;
1144 }
1145 1134
1146 if (regindex >= IXGBE_VLVF_ENTRIES) { 1135 /* find the vlanid or the first empty slot */
1147 if (first_empty_slot) 1136 first_empty_slot = 0;
1148 regindex = first_empty_slot; 1137
1149 else { 1138 for (vlvf_index = 1; vlvf_index < IXGBE_VLVF_ENTRIES; vlvf_index++) {
1150 hw_dbg(hw, "No space in VLVF.\n"); 1139 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(vlvf_index));
1151 goto out; 1140 if (!bits && !first_empty_slot)
1152 } 1141 first_empty_slot = vlvf_index;
1142 else if ((bits & 0x0FFF) == vlan)
1143 break;
1144 }
1145
1146 if (vlvf_index >= IXGBE_VLVF_ENTRIES) {
1147 if (first_empty_slot)
1148 vlvf_index = first_empty_slot;
1149 else {
1150 hw_dbg(hw, "No space in VLVF.\n");
1151 goto out;
1153 } 1152 }
1153 }
1154 1154
1155 if (vlan_on) { 1155 if (vlan_on) {
1156 /* set the pool bit */ 1156 /* set the pool bit */
1157 if (vind < 32) { 1157 if (vind < 32) {
1158 bits = IXGBE_READ_REG(hw, 1158 bits = IXGBE_READ_REG(hw,
1159 IXGBE_VLVFB(regindex * 2)); 1159 IXGBE_VLVFB(vlvf_index * 2));
1160 bits |= (1 << vind); 1160 bits |= (1 << vind);
1161 IXGBE_WRITE_REG(hw, 1161 IXGBE_WRITE_REG(hw,
1162 IXGBE_VLVFB(regindex * 2), bits); 1162 IXGBE_VLVFB(vlvf_index * 2), bits);
1163 } else {
1164 bits = IXGBE_READ_REG(hw,
1165 IXGBE_VLVFB((regindex * 2) + 1));
1166 bits |= (1 << vind);
1167 IXGBE_WRITE_REG(hw,
1168 IXGBE_VLVFB((regindex * 2) + 1), bits);
1169 }
1170 } else { 1163 } else {
1171 /* clear the pool bit */ 1164 bits = IXGBE_READ_REG(hw,
1172 if (vind < 32) { 1165 IXGBE_VLVFB((vlvf_index * 2) + 1));
1173 bits = IXGBE_READ_REG(hw, 1166 bits |= (1 << (vind - 32));
1174 IXGBE_VLVFB(regindex * 2)); 1167 IXGBE_WRITE_REG(hw,
1168 IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
1169 }
1170 } else {
1171 /* clear the pool bit */
1172 if (vind < 32) {
1173 bits = IXGBE_READ_REG(hw,
1174 IXGBE_VLVFB(vlvf_index * 2));
1175 bits &= ~(1 << vind); 1175 bits &= ~(1 << vind);
1176 IXGBE_WRITE_REG(hw, 1176 IXGBE_WRITE_REG(hw,
1177 IXGBE_VLVFB(regindex * 2), bits); 1177 IXGBE_VLVFB(vlvf_index * 2), bits);
1178 bits |= IXGBE_READ_REG(hw, 1178 bits |= IXGBE_READ_REG(hw,
1179 IXGBE_VLVFB((regindex * 2) + 1)); 1179 IXGBE_VLVFB((vlvf_index * 2) + 1));
1180 } else { 1180 } else {
1181 bits = IXGBE_READ_REG(hw, 1181 bits = IXGBE_READ_REG(hw,
1182 IXGBE_VLVFB((regindex * 2) + 1)); 1182 IXGBE_VLVFB((vlvf_index * 2) + 1));
1183 bits &= ~(1 << vind); 1183 bits &= ~(1 << (vind - 32));
1184 IXGBE_WRITE_REG(hw, 1184 IXGBE_WRITE_REG(hw,
1185 IXGBE_VLVFB((regindex * 2) + 1), bits); 1185 IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
1186 bits |= IXGBE_READ_REG(hw, 1186 bits |= IXGBE_READ_REG(hw,
1187 IXGBE_VLVFB(regindex * 2)); 1187 IXGBE_VLVFB(vlvf_index * 2));
1188 }
1189 } 1188 }
1189 }
1190 1190
1191 if (bits) 1191 if (bits) {
1192 IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 1192 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
1193 (IXGBE_VLVF_VIEN | vlan)); 1193 (IXGBE_VLVF_VIEN | vlan));
1194 else 1194 /* if bits is non-zero then some pools/VFs are still
1195 IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 0); 1195 * using this VLAN ID. Force the VFTA entry to on */
1196 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1197 bits |= (1 << bitindex);
1198 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1196 } 1199 }
1200 else
1201 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
1197 1202
1198out: 1203out:
1199 return 0; 1204 return 0;
@@ -1434,6 +1439,9 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
1434 /* Send interrupt when 64 filters are left */ 1439 /* Send interrupt when 64 filters are left */
1435 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT; 1440 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1436 1441
1442 /* Initialize the drop queue to Rx queue 127 */
1443 fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
1444
1437 switch (pballoc) { 1445 switch (pballoc) {
1438 case IXGBE_FDIR_PBALLOC_64K: 1446 case IXGBE_FDIR_PBALLOC_64K:
1439 /* 2k - 1 perfect filters */ 1447 /* 2k - 1 perfect filters */
@@ -1675,8 +1683,8 @@ s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
1675 * @src_addr_4: the fourth 4 bytes of the IP address to load 1683 * @src_addr_4: the fourth 4 bytes of the IP address to load
1676 **/ 1684 **/
1677s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input, 1685s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
1678 u32 src_addr_1, u32 src_addr_2, 1686 u32 src_addr_1, u32 src_addr_2,
1679 u32 src_addr_3, u32 src_addr_4) 1687 u32 src_addr_3, u32 src_addr_4)
1680{ 1688{
1681 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff; 1689 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff;
1682 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] = 1690 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] =
@@ -1718,8 +1726,8 @@ s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
1718 * @dst_addr_4: the fourth 4 bytes of the IP address to load 1726 * @dst_addr_4: the fourth 4 bytes of the IP address to load
1719 **/ 1727 **/
1720s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input, 1728s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
1721 u32 dst_addr_1, u32 dst_addr_2, 1729 u32 dst_addr_1, u32 dst_addr_2,
1722 u32 dst_addr_3, u32 dst_addr_4) 1730 u32 dst_addr_3, u32 dst_addr_4)
1723{ 1731{
1724 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff; 1732 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff;
1725 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] = 1733 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] =
@@ -1797,7 +1805,7 @@ s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
1797 * @vm_pool: the Virtual Machine pool to load 1805 * @vm_pool: the Virtual Machine pool to load
1798 **/ 1806 **/
1799s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, 1807s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input,
1800 u8 vm_pool) 1808 u8 vm_pool)
1801{ 1809{
1802 input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool; 1810 input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool;
1803 1811
@@ -1821,8 +1829,7 @@ s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
1821 * @input: input stream to search 1829 * @input: input stream to search
1822 * @vlan: the VLAN id to load 1830 * @vlan: the VLAN id to load
1823 **/ 1831 **/
1824static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, 1832static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
1825 u16 *vlan)
1826{ 1833{
1827 *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET]; 1834 *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
1828 *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8; 1835 *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
@@ -2078,23 +2085,26 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
2078 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter 2085 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
2079 * @hw: pointer to hardware structure 2086 * @hw: pointer to hardware structure
2080 * @input: input bitstream 2087 * @input: input bitstream
2088 * @input_masks: bitwise masks for relevant fields
2089 * @soft_id: software index into the silicon hash tables for filter storage
2081 * @queue: queue index to direct traffic to 2090 * @queue: queue index to direct traffic to
2082 * 2091 *
2083 * Note that the caller to this function must lock before calling, since the 2092 * Note that the caller to this function must lock before calling, since the
2084 * hardware writes must be protected from one another. 2093 * hardware writes must be protected from one another.
2085 **/ 2094 **/
2086s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 2095s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
2087 struct ixgbe_atr_input *input, 2096 struct ixgbe_atr_input *input,
2088 u16 soft_id, 2097 struct ixgbe_atr_input_masks *input_masks,
2089 u8 queue) 2098 u16 soft_id, u8 queue)
2090{ 2099{
2091 u32 fdircmd = 0; 2100 u32 fdircmd = 0;
2092 u32 fdirhash; 2101 u32 fdirhash;
2093 u32 src_ipv4, dst_ipv4; 2102 u32 src_ipv4 = 0, dst_ipv4 = 0;
2094 u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4; 2103 u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
2095 u16 src_port, dst_port, vlan_id, flex_bytes; 2104 u16 src_port, dst_port, vlan_id, flex_bytes;
2096 u16 bucket_hash; 2105 u16 bucket_hash;
2097 u8 l4type; 2106 u8 l4type;
2107 u8 fdirm = 0;
2098 2108
2099 /* Get our input values */ 2109 /* Get our input values */
2100 ixgbe_atr_get_l4type_82599(input, &l4type); 2110 ixgbe_atr_get_l4type_82599(input, &l4type);
@@ -2149,7 +2159,6 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
2149 /* IPv4 */ 2159 /* IPv4 */
2150 ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4); 2160 ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
2151 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4); 2161 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
2152
2153 } 2162 }
2154 2163
2155 ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4); 2164 ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
@@ -2158,7 +2167,78 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
2158 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id | 2167 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
2159 (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT))); 2168 (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
2160 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port | 2169 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
2161 (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT))); 2170 (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
2171
2172 /*
2173 * Program the relevant mask registers. If src/dst_port or src/dst_addr
2174 * are zero, then assume a full mask for that field. Also assume that
2175 * a VLAN of 0 is unspecified, so mask that out as well. L4type
2176 * cannot be masked out in this implementation.
2177 *
2178 * This also assumes IPv4 only. IPv6 masking isn't supported at this
2179 * point in time.
2180 */
2181 if (src_ipv4 == 0)
2182 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xffffffff);
2183 else
2184 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
2185
2186 if (dst_ipv4 == 0)
2187 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xffffffff);
2188 else
2189 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
2190
2191 switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
2192 case IXGBE_ATR_L4TYPE_TCP:
2193 if (src_port == 0)
2194 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xffff);
2195 else
2196 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
2197 input_masks->src_port_mask);
2198
2199 if (dst_port == 0)
2200 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
2201 (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
2202 (0xffff << 16)));
2203 else
2204 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
2205 (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
2206 (input_masks->dst_port_mask << 16)));
2207 break;
2208 case IXGBE_ATR_L4TYPE_UDP:
2209 if (src_port == 0)
2210 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xffff);
2211 else
2212 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
2213 input_masks->src_port_mask);
2214
2215 if (dst_port == 0)
2216 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
2217 (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
2218 (0xffff << 16)));
2219 else
2220 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
2221 (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
2222 (input_masks->src_port_mask << 16)));
2223 break;
2224 default:
2225 /* this already would have failed above */
2226 break;
2227 }
2228
2229 /* Program the last mask register, FDIRM */
2230 if (input_masks->vlan_id_mask || !vlan_id)
2231 /* Mask both VLAN and VLANP - bits 0 and 1 */
2232 fdirm |= 0x3;
2233
2234 if (input_masks->data_mask || !flex_bytes)
2235 /* Flex bytes need masking, so mask the whole thing - bit 4 */
2236 fdirm |= 0x10;
2237
2238 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
2239 fdirm |= 0x24;
2240
2241 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
2162 2242
2163 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW; 2243 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
2164 fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE; 2244 fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
@@ -2655,4 +2735,5 @@ struct ixgbe_info ixgbe_82599_info = {
2655 .mac_ops = &mac_ops_82599, 2735 .mac_ops = &mac_ops_82599,
2656 .eeprom_ops = &eeprom_ops_82599, 2736 .eeprom_ops = &eeprom_ops_82599,
2657 .phy_ops = &phy_ops_82599, 2737 .phy_ops = &phy_ops_82599,
2738 .mbx_ops = &mbx_ops_82599,
2658}; 2739};
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 21f158f79dd0..eb49020903c1 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -28,7 +28,6 @@
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/sched.h> 30#include <linux/sched.h>
31#include <linux/list.h>
32#include <linux/netdevice.h> 31#include <linux/netdevice.h>
33 32
34#include "ixgbe.h" 33#include "ixgbe.h"
@@ -1278,19 +1277,11 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1278 /* Get the MAC address from the RAR0 for later reference */ 1277 /* Get the MAC address from the RAR0 for later reference */
1279 hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 1278 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
1280 1279
1281 hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ", 1280 hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr);
1282 hw->mac.addr[0], hw->mac.addr[1],
1283 hw->mac.addr[2]);
1284 hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
1285 hw->mac.addr[4], hw->mac.addr[5]);
1286 } else { 1281 } else {
1287 /* Setup the receive address. */ 1282 /* Setup the receive address. */
1288 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); 1283 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
1289 hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ", 1284 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
1290 hw->mac.addr[0], hw->mac.addr[1],
1291 hw->mac.addr[2]);
1292 hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
1293 hw->mac.addr[4], hw->mac.addr[5]);
1294 1285
1295 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1286 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1296 } 1287 }
@@ -1355,7 +1346,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1355/** 1346/**
1356 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses 1347 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1357 * @hw: pointer to hardware structure 1348 * @hw: pointer to hardware structure
1358 * @uc_list: the list of new addresses 1349 * @netdev: pointer to net device structure
1359 * 1350 *
1360 * The given list replaces any existing list. Clears the secondary addrs from 1351 * The given list replaces any existing list. Clears the secondary addrs from
1361 * receive address registers. Uses unused receive address registers for the 1352 * receive address registers. Uses unused receive address registers for the
@@ -1365,7 +1356,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1365 * manually putting the device into promiscuous mode. 1356 * manually putting the device into promiscuous mode.
1366 **/ 1357 **/
1367s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, 1358s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
1368 struct list_head *uc_list) 1359 struct net_device *netdev)
1369{ 1360{
1370 u32 i; 1361 u32 i;
1371 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; 1362 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
@@ -1389,7 +1380,7 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
1389 } 1380 }
1390 1381
1391 /* Add the new addresses */ 1382 /* Add the new addresses */
1392 list_for_each_entry(ha, uc_list, list) { 1383 netdev_for_each_uc_addr(ha, netdev) {
1393 hw_dbg(hw, " Adding the secondary addresses:\n"); 1384 hw_dbg(hw, " Adding the secondary addresses:\n");
1394 ixgbe_add_uc_addr(hw, ha->addr, 0); 1385 ixgbe_add_uc_addr(hw, ha->addr, 0);
1395 } 1386 }
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index dfff0ffaa502..13606d4809c9 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -60,7 +60,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
60 u32 mc_addr_count, 60 u32 mc_addr_count,
61 ixgbe_mc_addr_itr func); 61 ixgbe_mc_addr_itr func);
62s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, 62s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
63 struct list_head *uc_list); 63 struct net_device *netdev);
64s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); 64s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
65s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); 65s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
66s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); 66s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index d77961fc75f9..7949a446e4c7 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -441,10 +441,8 @@ static int ixgbe_set_tso(struct net_device *netdev, u32 data)
441 netdev->features |= NETIF_F_TSO; 441 netdev->features |= NETIF_F_TSO;
442 netdev->features |= NETIF_F_TSO6; 442 netdev->features |= NETIF_F_TSO6;
443 } else { 443 } else {
444 netif_tx_stop_all_queues(netdev);
445 netdev->features &= ~NETIF_F_TSO; 444 netdev->features &= ~NETIF_F_TSO;
446 netdev->features &= ~NETIF_F_TSO6; 445 netdev->features &= ~NETIF_F_TSO6;
447 netif_tx_start_all_queues(netdev);
448 } 446 }
449 return 0; 447 return 0;
450} 448}
@@ -834,8 +832,8 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
834 struct ethtool_ringparam *ring) 832 struct ethtool_ringparam *ring)
835{ 833{
836 struct ixgbe_adapter *adapter = netdev_priv(netdev); 834 struct ixgbe_adapter *adapter = netdev_priv(netdev);
837 struct ixgbe_ring *tx_ring = adapter->tx_ring; 835 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
838 struct ixgbe_ring *rx_ring = adapter->rx_ring; 836 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
839 837
840 ring->rx_max_pending = IXGBE_MAX_RXD; 838 ring->rx_max_pending = IXGBE_MAX_RXD;
841 ring->tx_max_pending = IXGBE_MAX_TXD; 839 ring->tx_max_pending = IXGBE_MAX_TXD;
@@ -867,8 +865,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
867 new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD); 865 new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
868 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); 866 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
869 867
870 if ((new_tx_count == adapter->tx_ring->count) && 868 if ((new_tx_count == adapter->tx_ring[0]->count) &&
871 (new_rx_count == adapter->rx_ring->count)) { 869 (new_rx_count == adapter->rx_ring[0]->count)) {
872 /* nothing to do */ 870 /* nothing to do */
873 return 0; 871 return 0;
874 } 872 }
@@ -878,25 +876,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
878 876
879 if (!netif_running(adapter->netdev)) { 877 if (!netif_running(adapter->netdev)) {
880 for (i = 0; i < adapter->num_tx_queues; i++) 878 for (i = 0; i < adapter->num_tx_queues; i++)
881 adapter->tx_ring[i].count = new_tx_count; 879 adapter->tx_ring[i]->count = new_tx_count;
882 for (i = 0; i < adapter->num_rx_queues; i++) 880 for (i = 0; i < adapter->num_rx_queues; i++)
883 adapter->rx_ring[i].count = new_rx_count; 881 adapter->rx_ring[i]->count = new_rx_count;
884 adapter->tx_ring_count = new_tx_count; 882 adapter->tx_ring_count = new_tx_count;
885 adapter->rx_ring_count = new_rx_count; 883 adapter->rx_ring_count = new_rx_count;
886 goto err_setup; 884 goto clear_reset;
887 } 885 }
888 886
889 temp_tx_ring = kcalloc(adapter->num_tx_queues, 887 temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
890 sizeof(struct ixgbe_ring), GFP_KERNEL);
891 if (!temp_tx_ring) { 888 if (!temp_tx_ring) {
892 err = -ENOMEM; 889 err = -ENOMEM;
893 goto err_setup; 890 goto clear_reset;
894 } 891 }
895 892
896 if (new_tx_count != adapter->tx_ring_count) { 893 if (new_tx_count != adapter->tx_ring_count) {
897 memcpy(temp_tx_ring, adapter->tx_ring,
898 adapter->num_tx_queues * sizeof(struct ixgbe_ring));
899 for (i = 0; i < adapter->num_tx_queues; i++) { 894 for (i = 0; i < adapter->num_tx_queues; i++) {
895 memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
896 sizeof(struct ixgbe_ring));
900 temp_tx_ring[i].count = new_tx_count; 897 temp_tx_ring[i].count = new_tx_count;
901 err = ixgbe_setup_tx_resources(adapter, 898 err = ixgbe_setup_tx_resources(adapter,
902 &temp_tx_ring[i]); 899 &temp_tx_ring[i]);
@@ -904,28 +901,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
904 while (i) { 901 while (i) {
905 i--; 902 i--;
906 ixgbe_free_tx_resources(adapter, 903 ixgbe_free_tx_resources(adapter,
907 &temp_tx_ring[i]); 904 &temp_tx_ring[i]);
908 } 905 }
909 goto err_setup; 906 goto clear_reset;
910 } 907 }
911 } 908 }
912 need_update = true; 909 need_update = true;
913 } 910 }
914 911
915 temp_rx_ring = kcalloc(adapter->num_rx_queues, 912 temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
916 sizeof(struct ixgbe_ring), GFP_KERNEL); 913 if (!temp_rx_ring) {
917 if ((!temp_rx_ring) && (need_update)) {
918 for (i = 0; i < adapter->num_tx_queues; i++)
919 ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]);
920 kfree(temp_tx_ring);
921 err = -ENOMEM; 914 err = -ENOMEM;
922 goto err_setup; 915 goto err_setup;
923 } 916 }
924 917
925 if (new_rx_count != adapter->rx_ring_count) { 918 if (new_rx_count != adapter->rx_ring_count) {
926 memcpy(temp_rx_ring, adapter->rx_ring,
927 adapter->num_rx_queues * sizeof(struct ixgbe_ring));
928 for (i = 0; i < adapter->num_rx_queues; i++) { 919 for (i = 0; i < adapter->num_rx_queues; i++) {
920 memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
921 sizeof(struct ixgbe_ring));
929 temp_rx_ring[i].count = new_rx_count; 922 temp_rx_ring[i].count = new_rx_count;
930 err = ixgbe_setup_rx_resources(adapter, 923 err = ixgbe_setup_rx_resources(adapter,
931 &temp_rx_ring[i]); 924 &temp_rx_ring[i]);
@@ -947,22 +940,32 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
947 940
948 /* tx */ 941 /* tx */
949 if (new_tx_count != adapter->tx_ring_count) { 942 if (new_tx_count != adapter->tx_ring_count) {
950 kfree(adapter->tx_ring); 943 for (i = 0; i < adapter->num_tx_queues; i++) {
951 adapter->tx_ring = temp_tx_ring; 944 ixgbe_free_tx_resources(adapter,
952 temp_tx_ring = NULL; 945 adapter->tx_ring[i]);
946 memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
947 sizeof(struct ixgbe_ring));
948 }
953 adapter->tx_ring_count = new_tx_count; 949 adapter->tx_ring_count = new_tx_count;
954 } 950 }
955 951
956 /* rx */ 952 /* rx */
957 if (new_rx_count != adapter->rx_ring_count) { 953 if (new_rx_count != adapter->rx_ring_count) {
958 kfree(adapter->rx_ring); 954 for (i = 0; i < adapter->num_rx_queues; i++) {
959 adapter->rx_ring = temp_rx_ring; 955 ixgbe_free_rx_resources(adapter,
960 temp_rx_ring = NULL; 956 adapter->rx_ring[i]);
957 memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
958 sizeof(struct ixgbe_ring));
959 }
961 adapter->rx_ring_count = new_rx_count; 960 adapter->rx_ring_count = new_rx_count;
962 } 961 }
963 ixgbe_up(adapter); 962 ixgbe_up(adapter);
964 } 963 }
964
965 vfree(temp_rx_ring);
965err_setup: 966err_setup:
967 vfree(temp_tx_ring);
968clear_reset:
966 clear_bit(__IXGBE_RESETTING, &adapter->state); 969 clear_bit(__IXGBE_RESETTING, &adapter->state);
967 return err; 970 return err;
968} 971}
@@ -974,6 +977,9 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
974 return IXGBE_TEST_LEN; 977 return IXGBE_TEST_LEN;
975 case ETH_SS_STATS: 978 case ETH_SS_STATS:
976 return IXGBE_STATS_LEN; 979 return IXGBE_STATS_LEN;
980 case ETH_SS_NTUPLE_FILTERS:
981 return (ETHTOOL_MAX_NTUPLE_LIST_ENTRY *
982 ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY);
977 default: 983 default:
978 return -EOPNOTSUPP; 984 return -EOPNOTSUPP;
979 } 985 }
@@ -1007,13 +1013,13 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1007 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1013 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1008 } 1014 }
1009 for (j = 0; j < adapter->num_tx_queues; j++) { 1015 for (j = 0; j < adapter->num_tx_queues; j++) {
1010 queue_stat = (u64 *)&adapter->tx_ring[j].stats; 1016 queue_stat = (u64 *)&adapter->tx_ring[j]->stats;
1011 for (k = 0; k < stat_count; k++) 1017 for (k = 0; k < stat_count; k++)
1012 data[i + k] = queue_stat[k]; 1018 data[i + k] = queue_stat[k];
1013 i += k; 1019 i += k;
1014 } 1020 }
1015 for (j = 0; j < adapter->num_rx_queues; j++) { 1021 for (j = 0; j < adapter->num_rx_queues; j++) {
1016 queue_stat = (u64 *)&adapter->rx_ring[j].stats; 1022 queue_stat = (u64 *)&adapter->rx_ring[j]->stats;
1017 for (k = 0; k < stat_count; k++) 1023 for (k = 0; k < stat_count; k++)
1018 data[i + k] = queue_stat[k]; 1024 data[i + k] = queue_stat[k];
1019 i += k; 1025 i += k;
@@ -1627,7 +1633,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1627 reg_data |= IXGBE_RXDCTL_ENABLE; 1633 reg_data |= IXGBE_RXDCTL_ENABLE;
1628 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data); 1634 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
1629 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1635 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1630 int j = adapter->rx_ring[0].reg_idx; 1636 int j = adapter->rx_ring[0]->reg_idx;
1631 u32 k; 1637 u32 k;
1632 for (k = 0; k < 10; k++) { 1638 for (k = 0; k < 10; k++) {
1633 if (IXGBE_READ_REG(&adapter->hw, 1639 if (IXGBE_READ_REG(&adapter->hw,
@@ -1867,11 +1873,22 @@ static void ixgbe_diag_test(struct net_device *netdev,
1867 if (ixgbe_intr_test(adapter, &data[2])) 1873 if (ixgbe_intr_test(adapter, &data[2]))
1868 eth_test->flags |= ETH_TEST_FL_FAILED; 1874 eth_test->flags |= ETH_TEST_FL_FAILED;
1869 1875
1876 /* If SRIOV or VMDq is enabled then skip MAC
1877 * loopback diagnostic. */
1878 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
1879 IXGBE_FLAG_VMDQ_ENABLED)) {
1880 DPRINTK(HW, INFO, "Skip MAC loopback diagnostic in VT "
1881 "mode\n");
1882 data[3] = 0;
1883 goto skip_loopback;
1884 }
1885
1870 ixgbe_reset(adapter); 1886 ixgbe_reset(adapter);
1871 DPRINTK(HW, INFO, "loopback testing starting\n"); 1887 DPRINTK(HW, INFO, "loopback testing starting\n");
1872 if (ixgbe_loopback_test(adapter, &data[3])) 1888 if (ixgbe_loopback_test(adapter, &data[3]))
1873 eth_test->flags |= ETH_TEST_FL_FAILED; 1889 eth_test->flags |= ETH_TEST_FL_FAILED;
1874 1890
1891skip_loopback:
1875 ixgbe_reset(adapter); 1892 ixgbe_reset(adapter);
1876 1893
1877 clear_bit(__IXGBE_TESTING, &adapter->state); 1894 clear_bit(__IXGBE_TESTING, &adapter->state);
@@ -2000,7 +2017,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
2000{ 2017{
2001 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2018 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2002 2019
2003 ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit; 2020 ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0]->work_limit;
2004 2021
2005 /* only valid if in constant ITR mode */ 2022 /* only valid if in constant ITR mode */
2006 switch (adapter->rx_itr_setting) { 2023 switch (adapter->rx_itr_setting) {
@@ -2053,7 +2070,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2053 return -EINVAL; 2070 return -EINVAL;
2054 2071
2055 if (ec->tx_max_coalesced_frames_irq) 2072 if (ec->tx_max_coalesced_frames_irq)
2056 adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq; 2073 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
2057 2074
2058 if (ec->rx_coalesce_usecs > 1) { 2075 if (ec->rx_coalesce_usecs > 1) {
2059 /* check the limits */ 2076 /* check the limits */
@@ -2134,23 +2151,124 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2134static int ixgbe_set_flags(struct net_device *netdev, u32 data) 2151static int ixgbe_set_flags(struct net_device *netdev, u32 data)
2135{ 2152{
2136 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2153 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2154 bool need_reset = false;
2137 2155
2138 ethtool_op_set_flags(netdev, data); 2156 ethtool_op_set_flags(netdev, data);
2139 2157
2140 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
2141 return 0;
2142
2143 /* if state changes we need to update adapter->flags and reset */ 2158 /* if state changes we need to update adapter->flags and reset */
2144 if ((!!(data & ETH_FLAG_LRO)) != 2159 if ((!!(data & ETH_FLAG_LRO)) !=
2145 (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) { 2160 (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
2146 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; 2161 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
2162 need_reset = true;
2163 }
2164
2165 /*
2166 * Check if Flow Director n-tuple support was enabled or disabled. If
2167 * the state changed, we need to reset.
2168 */
2169 if ((adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) &&
2170 (!(data & ETH_FLAG_NTUPLE))) {
2171 /* turn off Flow Director perfect, set hash and reset */
2172 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
2173 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
2174 need_reset = true;
2175 } else if ((!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) &&
2176 (data & ETH_FLAG_NTUPLE)) {
2177 /* turn off Flow Director hash, enable perfect and reset */
2178 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
2179 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
2180 need_reset = true;
2181 } else {
2182 /* no state change */
2183 }
2184
2185 if (need_reset) {
2147 if (netif_running(netdev)) 2186 if (netif_running(netdev))
2148 ixgbe_reinit_locked(adapter); 2187 ixgbe_reinit_locked(adapter);
2149 else 2188 else
2150 ixgbe_reset(adapter); 2189 ixgbe_reset(adapter);
2151 } 2190 }
2191
2152 return 0; 2192 return 0;
2193}
2153 2194
2195static int ixgbe_set_rx_ntuple(struct net_device *dev,
2196 struct ethtool_rx_ntuple *cmd)
2197{
2198 struct ixgbe_adapter *adapter = netdev_priv(dev);
2199 struct ethtool_rx_ntuple_flow_spec fs = cmd->fs;
2200 struct ixgbe_atr_input input_struct;
2201 struct ixgbe_atr_input_masks input_masks;
2202 int target_queue;
2203
2204 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2205 return -EOPNOTSUPP;
2206
2207 /*
2208 * Don't allow programming if the action is a queue greater than
2209 * the number of online Tx queues.
2210 */
2211 if ((fs.action >= adapter->num_tx_queues) ||
2212 (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP))
2213 return -EINVAL;
2214
2215 memset(&input_struct, 0, sizeof(struct ixgbe_atr_input));
2216 memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
2217
2218 input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src;
2219 input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst;
2220 input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc;
2221 input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst;
2222 input_masks.vlan_id_mask = fs.vlan_tag_mask;
2223 /* only use the lowest 2 bytes for flex bytes */
2224 input_masks.data_mask = (fs.data_mask & 0xffff);
2225
2226 switch (fs.flow_type) {
2227 case TCP_V4_FLOW:
2228 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP);
2229 break;
2230 case UDP_V4_FLOW:
2231 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP);
2232 break;
2233 case SCTP_V4_FLOW:
2234 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP);
2235 break;
2236 default:
2237 return -1;
2238 }
2239
2240 /* Mask bits from the inputs based on user-supplied mask */
2241 ixgbe_atr_set_src_ipv4_82599(&input_struct,
2242 (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src));
2243 ixgbe_atr_set_dst_ipv4_82599(&input_struct,
2244 (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst));
2245 /* 82599 expects these to be byte-swapped for perfect filtering */
2246 ixgbe_atr_set_src_port_82599(&input_struct,
2247 ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc));
2248 ixgbe_atr_set_dst_port_82599(&input_struct,
2249 ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst));
2250
2251 /* VLAN and Flex bytes are either completely masked or not */
2252 if (!fs.vlan_tag_mask)
2253 ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag);
2254
2255 if (!input_masks.data_mask)
2256 /* make sure we only use the first 2 bytes of user data */
2257 ixgbe_atr_set_flex_byte_82599(&input_struct,
2258 (fs.data & 0xffff));
2259
2260 /* determine if we need to drop or route the packet */
2261 if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
2262 target_queue = MAX_RX_QUEUES - 1;
2263 else
2264 target_queue = fs.action;
2265
2266 spin_lock(&adapter->fdir_perfect_lock);
2267 ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct,
2268 &input_masks, 0, target_queue);
2269 spin_unlock(&adapter->fdir_perfect_lock);
2270
2271 return 0;
2154} 2272}
2155 2273
2156static const struct ethtool_ops ixgbe_ethtool_ops = { 2274static const struct ethtool_ops ixgbe_ethtool_ops = {
@@ -2188,6 +2306,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
2188 .set_coalesce = ixgbe_set_coalesce, 2306 .set_coalesce = ixgbe_set_coalesce,
2189 .get_flags = ethtool_op_get_flags, 2307 .get_flags = ethtool_op_get_flags,
2190 .set_flags = ixgbe_set_flags, 2308 .set_flags = ixgbe_set_flags,
2309 .set_rx_ntuple = ixgbe_set_rx_ntuple,
2191}; 2310};
2192 2311
2193void ixgbe_set_ethtool_ops(struct net_device *netdev) 2312void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index e9a20c88c155..4123dec0dfb7 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -525,7 +525,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
525 for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { 525 for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
526 fcoe_i = f->mask + i % f->indices; 526 fcoe_i = f->mask + i % f->indices;
527 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; 527 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
528 fcoe_q = adapter->rx_ring[fcoe_i].reg_idx; 528 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
529 IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); 529 IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
530 } 530 }
531 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); 531 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
@@ -533,7 +533,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
533 } else { 533 } else {
534 /* Use single rx queue for FCoE */ 534 /* Use single rx queue for FCoE */
535 fcoe_i = f->mask; 535 fcoe_i = f->mask;
536 fcoe_q = adapter->rx_ring[fcoe_i].reg_idx; 536 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
537 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0); 537 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
538 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 538 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
539 IXGBE_ETQS_QUEUE_EN | 539 IXGBE_ETQS_QUEUE_EN |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 951b73cf5ca2..45e3532b166f 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -45,12 +45,13 @@
45#include "ixgbe.h" 45#include "ixgbe.h"
46#include "ixgbe_common.h" 46#include "ixgbe_common.h"
47#include "ixgbe_dcb_82599.h" 47#include "ixgbe_dcb_82599.h"
48#include "ixgbe_sriov.h"
48 49
49char ixgbe_driver_name[] = "ixgbe"; 50char ixgbe_driver_name[] = "ixgbe";
50static const char ixgbe_driver_string[] = 51static const char ixgbe_driver_string[] =
51 "Intel(R) 10 Gigabit PCI Express Network Driver"; 52 "Intel(R) 10 Gigabit PCI Express Network Driver";
52 53
53#define DRV_VERSION "2.0.44-k2" 54#define DRV_VERSION "2.0.62-k2"
54const char ixgbe_driver_version[] = DRV_VERSION; 55const char ixgbe_driver_version[] = DRV_VERSION;
55static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; 56static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
56 57
@@ -67,7 +68,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
67 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 68 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
68 * Class, Class Mask, private data (not used) } 69 * Class, Class Mask, private data (not used) }
69 */ 70 */
70static struct pci_device_id ixgbe_pci_tbl[] = { 71static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
71 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), 72 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
72 board_82598 }, 73 board_82598 },
73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), 74 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
@@ -124,6 +125,13 @@ static struct notifier_block dca_notifier = {
124}; 125};
125#endif 126#endif
126 127
128#ifdef CONFIG_PCI_IOV
129static unsigned int max_vfs;
130module_param(max_vfs, uint, 0);
131MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
132 "per physical function");
133#endif /* CONFIG_PCI_IOV */
134
127MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 135MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
128MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); 136MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
129MODULE_LICENSE("GPL"); 137MODULE_LICENSE("GPL");
@@ -131,6 +139,41 @@ MODULE_VERSION(DRV_VERSION);
131 139
132#define DEFAULT_DEBUG_LEVEL_SHIFT 3 140#define DEFAULT_DEBUG_LEVEL_SHIFT 3
133 141
142static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
143{
144 struct ixgbe_hw *hw = &adapter->hw;
145 u32 gcr;
146 u32 gpie;
147 u32 vmdctl;
148
149#ifdef CONFIG_PCI_IOV
150 /* disable iov and allow time for transactions to clear */
151 pci_disable_sriov(adapter->pdev);
152#endif
153
154 /* turn off device IOV mode */
155 gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
156 gcr &= ~(IXGBE_GCR_EXT_SRIOV);
157 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
158 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
159 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
160 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
161
162 /* set default pool back to 0 */
163 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
164 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
165 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
166
167 /* take a breather then clean up driver data */
168 msleep(100);
169 if (adapter->vfinfo)
170 kfree(adapter->vfinfo);
171 adapter->vfinfo = NULL;
172
173 adapter->num_vfs = 0;
174 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
175}
176
134static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) 177static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
135{ 178{
136 u32 ctrl_ext; 179 u32 ctrl_ext;
@@ -451,7 +494,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
451{ 494{
452 u32 rxctrl; 495 u32 rxctrl;
453 int cpu = get_cpu(); 496 int cpu = get_cpu();
454 int q = rx_ring - adapter->rx_ring; 497 int q = rx_ring->reg_idx;
455 498
456 if (rx_ring->cpu != cpu) { 499 if (rx_ring->cpu != cpu) {
457 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); 500 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
@@ -479,7 +522,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
479{ 522{
480 u32 txctrl; 523 u32 txctrl;
481 int cpu = get_cpu(); 524 int cpu = get_cpu();
482 int q = tx_ring - adapter->tx_ring; 525 int q = tx_ring->reg_idx;
483 struct ixgbe_hw *hw = &adapter->hw; 526 struct ixgbe_hw *hw = &adapter->hw;
484 527
485 if (tx_ring->cpu != cpu) { 528 if (tx_ring->cpu != cpu) {
@@ -513,12 +556,12 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
513 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); 556 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
514 557
515 for (i = 0; i < adapter->num_tx_queues; i++) { 558 for (i = 0; i < adapter->num_tx_queues; i++) {
516 adapter->tx_ring[i].cpu = -1; 559 adapter->tx_ring[i]->cpu = -1;
517 ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]); 560 ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
518 } 561 }
519 for (i = 0; i < adapter->num_rx_queues; i++) { 562 for (i = 0; i < adapter->num_rx_queues; i++) {
520 adapter->rx_ring[i].cpu = -1; 563 adapter->rx_ring[i]->cpu = -1;
521 ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]); 564 ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
522 } 565 }
523} 566}
524 567
@@ -775,6 +818,12 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
775 return skb; 818 return skb;
776} 819}
777 820
821struct ixgbe_rsc_cb {
822 dma_addr_t dma;
823};
824
825#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
826
778static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, 827static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
779 struct ixgbe_ring *rx_ring, 828 struct ixgbe_ring *rx_ring,
780 int *work_done, int work_to_do) 829 int *work_done, int work_to_do)
@@ -806,6 +855,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
806 break; 855 break;
807 (*work_done)++; 856 (*work_done)++;
808 857
858 rmb(); /* read descriptor and rx_buffer_info after status DD */
809 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 859 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
810 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); 860 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
811 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> 861 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
@@ -823,9 +873,21 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
823 rx_buffer_info->skb = NULL; 873 rx_buffer_info->skb = NULL;
824 874
825 if (rx_buffer_info->dma) { 875 if (rx_buffer_info->dma) {
826 pci_unmap_single(pdev, rx_buffer_info->dma, 876 if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
827 rx_ring->rx_buf_len, 877 (!(staterr & IXGBE_RXD_STAT_EOP)) &&
828 PCI_DMA_FROMDEVICE); 878 (!(skb->prev)))
879 /*
880 * When HWRSC is enabled, delay unmapping
881 * of the first packet. It carries the
882 * header information, HW may still
883 * access the header after the writeback.
884 * Only unmap it when EOP is reached
885 */
886 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
887 else
888 pci_unmap_single(pdev, rx_buffer_info->dma,
889 rx_ring->rx_buf_len,
890 PCI_DMA_FROMDEVICE);
829 rx_buffer_info->dma = 0; 891 rx_buffer_info->dma = 0;
830 skb_put(skb, len); 892 skb_put(skb, len);
831 } 893 }
@@ -873,6 +935,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
873 if (skb->prev) 935 if (skb->prev)
874 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); 936 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
875 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 937 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
938 if (IXGBE_RSC_CB(skb)->dma)
939 pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma,
940 rx_ring->rx_buf_len,
941 PCI_DMA_FROMDEVICE);
876 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) 942 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
877 rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; 943 rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
878 else 944 else
@@ -989,7 +1055,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
989 adapter->num_rx_queues); 1055 adapter->num_rx_queues);
990 1056
991 for (i = 0; i < q_vector->rxr_count; i++) { 1057 for (i = 0; i < q_vector->rxr_count; i++) {
992 j = adapter->rx_ring[r_idx].reg_idx; 1058 j = adapter->rx_ring[r_idx]->reg_idx;
993 ixgbe_set_ivar(adapter, 0, j, v_idx); 1059 ixgbe_set_ivar(adapter, 0, j, v_idx);
994 r_idx = find_next_bit(q_vector->rxr_idx, 1060 r_idx = find_next_bit(q_vector->rxr_idx,
995 adapter->num_rx_queues, 1061 adapter->num_rx_queues,
@@ -999,7 +1065,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
999 adapter->num_tx_queues); 1065 adapter->num_tx_queues);
1000 1066
1001 for (i = 0; i < q_vector->txr_count; i++) { 1067 for (i = 0; i < q_vector->txr_count; i++) {
1002 j = adapter->tx_ring[r_idx].reg_idx; 1068 j = adapter->tx_ring[r_idx]->reg_idx;
1003 ixgbe_set_ivar(adapter, 1, j, v_idx); 1069 ixgbe_set_ivar(adapter, 1, j, v_idx);
1004 r_idx = find_next_bit(q_vector->txr_idx, 1070 r_idx = find_next_bit(q_vector->txr_idx,
1005 adapter->num_tx_queues, 1071 adapter->num_tx_queues,
@@ -1025,7 +1091,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1025 1091
1026 /* set up to autoclear timer, and the vectors */ 1092 /* set up to autoclear timer, and the vectors */
1027 mask = IXGBE_EIMS_ENABLE_MASK; 1093 mask = IXGBE_EIMS_ENABLE_MASK;
1028 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 1094 if (adapter->num_vfs)
1095 mask &= ~(IXGBE_EIMS_OTHER |
1096 IXGBE_EIMS_MAILBOX |
1097 IXGBE_EIMS_LSC);
1098 else
1099 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1029 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); 1100 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
1030} 1101}
1031 1102
@@ -1134,7 +1205,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1134 1205
1135 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1206 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1136 for (i = 0; i < q_vector->txr_count; i++) { 1207 for (i = 0; i < q_vector->txr_count; i++) {
1137 tx_ring = &(adapter->tx_ring[r_idx]); 1208 tx_ring = adapter->tx_ring[r_idx];
1138 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 1209 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1139 q_vector->tx_itr, 1210 q_vector->tx_itr,
1140 tx_ring->total_packets, 1211 tx_ring->total_packets,
@@ -1149,7 +1220,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1149 1220
1150 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1221 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1151 for (i = 0; i < q_vector->rxr_count; i++) { 1222 for (i = 0; i < q_vector->rxr_count; i++) {
1152 rx_ring = &(adapter->rx_ring[r_idx]); 1223 rx_ring = adapter->rx_ring[r_idx];
1153 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 1224 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1154 q_vector->rx_itr, 1225 q_vector->rx_itr,
1155 rx_ring->total_packets, 1226 rx_ring->total_packets,
@@ -1254,6 +1325,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1254 if (eicr & IXGBE_EICR_LSC) 1325 if (eicr & IXGBE_EICR_LSC)
1255 ixgbe_check_lsc(adapter); 1326 ixgbe_check_lsc(adapter);
1256 1327
1328 if (eicr & IXGBE_EICR_MAILBOX)
1329 ixgbe_msg_task(adapter);
1330
1257 if (hw->mac.type == ixgbe_mac_82598EB) 1331 if (hw->mac.type == ixgbe_mac_82598EB)
1258 ixgbe_check_fan_failure(adapter, eicr); 1332 ixgbe_check_fan_failure(adapter, eicr);
1259 1333
@@ -1268,7 +1342,7 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1268 netif_tx_stop_all_queues(netdev); 1342 netif_tx_stop_all_queues(netdev);
1269 for (i = 0; i < adapter->num_tx_queues; i++) { 1343 for (i = 0; i < adapter->num_tx_queues; i++) {
1270 struct ixgbe_ring *tx_ring = 1344 struct ixgbe_ring *tx_ring =
1271 &adapter->tx_ring[i]; 1345 adapter->tx_ring[i];
1272 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, 1346 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
1273 &tx_ring->reinit_state)) 1347 &tx_ring->reinit_state))
1274 schedule_work(&adapter->fdir_reinit_task); 1348 schedule_work(&adapter->fdir_reinit_task);
@@ -1327,7 +1401,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1327 1401
1328 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1402 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1329 for (i = 0; i < q_vector->txr_count; i++) { 1403 for (i = 0; i < q_vector->txr_count; i++) {
1330 tx_ring = &(adapter->tx_ring[r_idx]); 1404 tx_ring = adapter->tx_ring[r_idx];
1331 tx_ring->total_bytes = 0; 1405 tx_ring->total_bytes = 0;
1332 tx_ring->total_packets = 0; 1406 tx_ring->total_packets = 0;
1333 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1407 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
@@ -1355,7 +1429,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1355 1429
1356 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1430 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1357 for (i = 0; i < q_vector->rxr_count; i++) { 1431 for (i = 0; i < q_vector->rxr_count; i++) {
1358 rx_ring = &(adapter->rx_ring[r_idx]); 1432 rx_ring = adapter->rx_ring[r_idx];
1359 rx_ring->total_bytes = 0; 1433 rx_ring->total_bytes = 0;
1360 rx_ring->total_packets = 0; 1434 rx_ring->total_packets = 0;
1361 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1435 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
@@ -1385,7 +1459,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1385 1459
1386 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1460 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1387 for (i = 0; i < q_vector->txr_count; i++) { 1461 for (i = 0; i < q_vector->txr_count; i++) {
1388 ring = &(adapter->tx_ring[r_idx]); 1462 ring = adapter->tx_ring[r_idx];
1389 ring->total_bytes = 0; 1463 ring->total_bytes = 0;
1390 ring->total_packets = 0; 1464 ring->total_packets = 0;
1391 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1465 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
@@ -1394,7 +1468,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1394 1468
1395 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1469 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1396 for (i = 0; i < q_vector->rxr_count; i++) { 1470 for (i = 0; i < q_vector->rxr_count; i++) {
1397 ring = &(adapter->rx_ring[r_idx]); 1471 ring = adapter->rx_ring[r_idx];
1398 ring->total_bytes = 0; 1472 ring->total_bytes = 0;
1399 ring->total_packets = 0; 1473 ring->total_packets = 0;
1400 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1474 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
@@ -1425,7 +1499,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1425 long r_idx; 1499 long r_idx;
1426 1500
1427 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1501 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1428 rx_ring = &(adapter->rx_ring[r_idx]); 1502 rx_ring = adapter->rx_ring[r_idx];
1429#ifdef CONFIG_IXGBE_DCA 1503#ifdef CONFIG_IXGBE_DCA
1430 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1504 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1431 ixgbe_update_rx_dca(adapter, rx_ring); 1505 ixgbe_update_rx_dca(adapter, rx_ring);
@@ -1466,7 +1540,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1466 1540
1467 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1541 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1468 for (i = 0; i < q_vector->txr_count; i++) { 1542 for (i = 0; i < q_vector->txr_count; i++) {
1469 ring = &(adapter->tx_ring[r_idx]); 1543 ring = adapter->tx_ring[r_idx];
1470#ifdef CONFIG_IXGBE_DCA 1544#ifdef CONFIG_IXGBE_DCA
1471 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1545 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1472 ixgbe_update_tx_dca(adapter, ring); 1546 ixgbe_update_tx_dca(adapter, ring);
@@ -1482,7 +1556,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1482 budget = max(budget, 1); 1556 budget = max(budget, 1);
1483 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1557 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1484 for (i = 0; i < q_vector->rxr_count; i++) { 1558 for (i = 0; i < q_vector->rxr_count; i++) {
1485 ring = &(adapter->rx_ring[r_idx]); 1559 ring = adapter->rx_ring[r_idx];
1486#ifdef CONFIG_IXGBE_DCA 1560#ifdef CONFIG_IXGBE_DCA
1487 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1561 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1488 ixgbe_update_rx_dca(adapter, ring); 1562 ixgbe_update_rx_dca(adapter, ring);
@@ -1493,7 +1567,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1493 } 1567 }
1494 1568
1495 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1569 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1496 ring = &(adapter->rx_ring[r_idx]); 1570 ring = adapter->rx_ring[r_idx];
1497 /* If all Rx work done, exit the polling mode */ 1571 /* If all Rx work done, exit the polling mode */
1498 if (work_done < budget) { 1572 if (work_done < budget) {
1499 napi_complete(napi); 1573 napi_complete(napi);
@@ -1526,7 +1600,7 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
1526 long r_idx; 1600 long r_idx;
1527 1601
1528 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1602 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1529 tx_ring = &(adapter->tx_ring[r_idx]); 1603 tx_ring = adapter->tx_ring[r_idx];
1530#ifdef CONFIG_IXGBE_DCA 1604#ifdef CONFIG_IXGBE_DCA
1531 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1605 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1532 ixgbe_update_tx_dca(adapter, tx_ring); 1606 ixgbe_update_tx_dca(adapter, tx_ring);
@@ -1711,8 +1785,8 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1711 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; 1785 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
1712 u8 current_itr; 1786 u8 current_itr;
1713 u32 new_itr = q_vector->eitr; 1787 u32 new_itr = q_vector->eitr;
1714 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0]; 1788 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
1715 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0]; 1789 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
1716 1790
1717 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, 1791 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
1718 q_vector->tx_itr, 1792 q_vector->tx_itr,
@@ -1768,6 +1842,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1768 mask |= IXGBE_EIMS_ECC; 1842 mask |= IXGBE_EIMS_ECC;
1769 mask |= IXGBE_EIMS_GPI_SDP1; 1843 mask |= IXGBE_EIMS_GPI_SDP1;
1770 mask |= IXGBE_EIMS_GPI_SDP2; 1844 mask |= IXGBE_EIMS_GPI_SDP2;
1845 if (adapter->num_vfs)
1846 mask |= IXGBE_EIMS_MAILBOX;
1771 } 1847 }
1772 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 1848 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
1773 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 1849 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -1776,6 +1852,11 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1776 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1852 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1777 ixgbe_irq_enable_queues(adapter, ~0); 1853 ixgbe_irq_enable_queues(adapter, ~0);
1778 IXGBE_WRITE_FLUSH(&adapter->hw); 1854 IXGBE_WRITE_FLUSH(&adapter->hw);
1855
1856 if (adapter->num_vfs > 32) {
1857 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
1858 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
1859 }
1779} 1860}
1780 1861
1781/** 1862/**
@@ -1817,10 +1898,10 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
1817 ixgbe_check_fan_failure(adapter, eicr); 1898 ixgbe_check_fan_failure(adapter, eicr);
1818 1899
1819 if (napi_schedule_prep(&(q_vector->napi))) { 1900 if (napi_schedule_prep(&(q_vector->napi))) {
1820 adapter->tx_ring[0].total_packets = 0; 1901 adapter->tx_ring[0]->total_packets = 0;
1821 adapter->tx_ring[0].total_bytes = 0; 1902 adapter->tx_ring[0]->total_bytes = 0;
1822 adapter->rx_ring[0].total_packets = 0; 1903 adapter->rx_ring[0]->total_packets = 0;
1823 adapter->rx_ring[0].total_bytes = 0; 1904 adapter->rx_ring[0]->total_bytes = 0;
1824 /* would disable interrupts here but EIAM disabled it */ 1905 /* would disable interrupts here but EIAM disabled it */
1825 __napi_schedule(&(q_vector->napi)); 1906 __napi_schedule(&(q_vector->napi));
1826 } 1907 }
@@ -1905,6 +1986,8 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1905 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 1986 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
1906 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 1987 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
1907 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 1988 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
1989 if (adapter->num_vfs > 32)
1990 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
1908 } 1991 }
1909 IXGBE_WRITE_FLUSH(&adapter->hw); 1992 IXGBE_WRITE_FLUSH(&adapter->hw);
1910 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1993 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -1950,7 +2033,7 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1950 2033
1951 /* Setup the HW Tx Head and Tail descriptor pointers */ 2034 /* Setup the HW Tx Head and Tail descriptor pointers */
1952 for (i = 0; i < adapter->num_tx_queues; i++) { 2035 for (i = 0; i < adapter->num_tx_queues; i++) {
1953 struct ixgbe_ring *ring = &adapter->tx_ring[i]; 2036 struct ixgbe_ring *ring = adapter->tx_ring[i];
1954 j = ring->reg_idx; 2037 j = ring->reg_idx;
1955 tdba = ring->dma; 2038 tdba = ring->dma;
1956 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); 2039 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
@@ -1960,8 +2043,8 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1960 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen); 2043 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
1961 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 2044 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
1962 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 2045 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
1963 adapter->tx_ring[i].head = IXGBE_TDH(j); 2046 adapter->tx_ring[i]->head = IXGBE_TDH(j);
1964 adapter->tx_ring[i].tail = IXGBE_TDT(j); 2047 adapter->tx_ring[i]->tail = IXGBE_TDT(j);
1965 /* 2048 /*
1966 * Disable Tx Head Writeback RO bit, since this hoses 2049 * Disable Tx Head Writeback RO bit, since this hoses
1967 * bookkeeping if things aren't delivered in order. 2050 * bookkeeping if things aren't delivered in order.
@@ -1989,18 +2072,32 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1989 2072
1990 if (hw->mac.type == ixgbe_mac_82599EB) { 2073 if (hw->mac.type == ixgbe_mac_82599EB) {
1991 u32 rttdcs; 2074 u32 rttdcs;
2075 u32 mask;
1992 2076
1993 /* disable the arbiter while setting MTQC */ 2077 /* disable the arbiter while setting MTQC */
1994 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 2078 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
1995 rttdcs |= IXGBE_RTTDCS_ARBDIS; 2079 rttdcs |= IXGBE_RTTDCS_ARBDIS;
1996 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 2080 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
1997 2081
1998 /* We enable 8 traffic classes, DCB only */ 2082 /* set transmit pool layout */
1999 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) 2083 mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
2000 IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA | 2084 switch (adapter->flags & mask) {
2001 IXGBE_MTQC_8TC_8TQ)); 2085
2002 else 2086 case (IXGBE_FLAG_SRIOV_ENABLED):
2087 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2088 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
2089 break;
2090
2091 case (IXGBE_FLAG_DCB_ENABLED):
2092 /* We enable 8 traffic classes, DCB only */
2093 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2094 (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
2095 break;
2096
2097 default:
2003 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); 2098 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2099 break;
2100 }
2004 2101
2005 /* re-eable the arbiter */ 2102 /* re-eable the arbiter */
2006 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 2103 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
@@ -2059,12 +2156,16 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2059#ifdef CONFIG_IXGBE_DCB 2156#ifdef CONFIG_IXGBE_DCB
2060 | IXGBE_FLAG_DCB_ENABLED 2157 | IXGBE_FLAG_DCB_ENABLED
2061#endif 2158#endif
2159 | IXGBE_FLAG_SRIOV_ENABLED
2062 ); 2160 );
2063 2161
2064 switch (mask) { 2162 switch (mask) {
2065 case (IXGBE_FLAG_RSS_ENABLED): 2163 case (IXGBE_FLAG_RSS_ENABLED):
2066 mrqc = IXGBE_MRQC_RSSEN; 2164 mrqc = IXGBE_MRQC_RSSEN;
2067 break; 2165 break;
2166 case (IXGBE_FLAG_SRIOV_ENABLED):
2167 mrqc = IXGBE_MRQC_VMDQEN;
2168 break;
2068#ifdef CONFIG_IXGBE_DCB 2169#ifdef CONFIG_IXGBE_DCB
2069 case (IXGBE_FLAG_DCB_ENABLED): 2170 case (IXGBE_FLAG_DCB_ENABLED):
2070 mrqc = IXGBE_MRQC_RT8TCEN; 2171 mrqc = IXGBE_MRQC_RT8TCEN;
@@ -2090,7 +2191,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index)
2090 u32 rscctrl; 2191 u32 rscctrl;
2091 int rx_buf_len; 2192 int rx_buf_len;
2092 2193
2093 rx_ring = &adapter->rx_ring[index]; 2194 rx_ring = adapter->rx_ring[index];
2094 j = rx_ring->reg_idx; 2195 j = rx_ring->reg_idx;
2095 rx_buf_len = rx_ring->rx_buf_len; 2196 rx_buf_len = rx_ring->rx_buf_len;
2096 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j)); 2197 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
@@ -2145,7 +2246,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2145 int rx_buf_len; 2246 int rx_buf_len;
2146 2247
2147 /* Decide whether to use packet split mode or not */ 2248 /* Decide whether to use packet split mode or not */
2148 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; 2249 /* Do not use packet split if we're in SR-IOV Mode */
2250 if (!adapter->num_vfs)
2251 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
2149 2252
2150 /* Set the RX buffer length according to the mode */ 2253 /* Set the RX buffer length according to the mode */
2151 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 2254 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -2157,7 +2260,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2157 IXGBE_PSRTYPE_IPV4HDR | 2260 IXGBE_PSRTYPE_IPV4HDR |
2158 IXGBE_PSRTYPE_IPV6HDR | 2261 IXGBE_PSRTYPE_IPV6HDR |
2159 IXGBE_PSRTYPE_L2HDR; 2262 IXGBE_PSRTYPE_L2HDR;
2160 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 2263 IXGBE_WRITE_REG(hw,
2264 IXGBE_PSRTYPE(adapter->num_vfs),
2265 psrtype);
2161 } 2266 }
2162 } else { 2267 } else {
2163 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && 2268 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
@@ -2184,7 +2289,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2184#endif 2289#endif
2185 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 2290 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2186 2291
2187 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); 2292 rdlen = adapter->rx_ring[0]->count * sizeof(union ixgbe_adv_rx_desc);
2188 /* disable receives while setting up the descriptors */ 2293 /* disable receives while setting up the descriptors */
2189 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 2294 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2190 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 2295 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
@@ -2194,7 +2299,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2194 * the Base and Length of the Rx Descriptor Ring 2299 * the Base and Length of the Rx Descriptor Ring
2195 */ 2300 */
2196 for (i = 0; i < adapter->num_rx_queues; i++) { 2301 for (i = 0; i < adapter->num_rx_queues; i++) {
2197 rx_ring = &adapter->rx_ring[i]; 2302 rx_ring = adapter->rx_ring[i];
2198 rdba = rx_ring->dma; 2303 rdba = rx_ring->dma;
2199 j = rx_ring->reg_idx; 2304 j = rx_ring->reg_idx;
2200 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32))); 2305 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
@@ -2243,6 +2348,30 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2243 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 2348 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2244 } 2349 }
2245 2350
2351 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2352 u32 vt_reg_bits;
2353 u32 reg_offset, vf_shift;
2354 u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2355 vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
2356 | IXGBE_VT_CTL_REPLEN;
2357 vt_reg_bits |= (adapter->num_vfs <<
2358 IXGBE_VT_CTL_POOL_SHIFT);
2359 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
2360 IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
2361
2362 vf_shift = adapter->num_vfs % 32;
2363 reg_offset = adapter->num_vfs / 32;
2364 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
2365 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
2366 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
2367 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
2368 /* Enable only the PF's pool for Tx/Rx */
2369 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2370 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2371 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2372 ixgbe_set_vmolr(hw, adapter->num_vfs);
2373 }
2374
2246 /* Program MRQC for the distribution of queues */ 2375 /* Program MRQC for the distribution of queues */
2247 mrqc = ixgbe_setup_mrqc(adapter); 2376 mrqc = ixgbe_setup_mrqc(adapter);
2248 2377
@@ -2274,6 +2403,20 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2274 } 2403 }
2275 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2404 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2276 2405
2406 if (adapter->num_vfs) {
2407 u32 reg;
2408
2409 /* Map PF MAC address in RAR Entry 0 to first pool
2410 * following VFs */
2411 hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
2412
2413 /* Set up VF register offsets for selected VT Mode, i.e.
2414 * 64 VFs for SR-IOV */
2415 reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
2416 reg |= IXGBE_GCR_EXT_SRIOV;
2417 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg);
2418 }
2419
2277 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 2420 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2278 2421
2279 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED || 2422 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
@@ -2312,15 +2455,17 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2312{ 2455{
2313 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2456 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2314 struct ixgbe_hw *hw = &adapter->hw; 2457 struct ixgbe_hw *hw = &adapter->hw;
2458 int pool_ndx = adapter->num_vfs;
2315 2459
2316 /* add VID to filter table */ 2460 /* add VID to filter table */
2317 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true); 2461 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
2318} 2462}
2319 2463
2320static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 2464static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2321{ 2465{
2322 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2466 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2323 struct ixgbe_hw *hw = &adapter->hw; 2467 struct ixgbe_hw *hw = &adapter->hw;
2468 int pool_ndx = adapter->num_vfs;
2324 2469
2325 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2470 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2326 ixgbe_irq_disable(adapter); 2471 ixgbe_irq_disable(adapter);
@@ -2331,7 +2476,7 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2331 ixgbe_irq_enable(adapter); 2476 ixgbe_irq_enable(adapter);
2332 2477
2333 /* remove VID from filter table */ 2478 /* remove VID from filter table */
2334 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false); 2479 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
2335} 2480}
2336 2481
2337static void ixgbe_vlan_rx_register(struct net_device *netdev, 2482static void ixgbe_vlan_rx_register(struct net_device *netdev,
@@ -2361,7 +2506,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
2361 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 2506 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
2362 for (i = 0; i < adapter->num_rx_queues; i++) { 2507 for (i = 0; i < adapter->num_rx_queues; i++) {
2363 u32 ctrl; 2508 u32 ctrl;
2364 j = adapter->rx_ring[i].reg_idx; 2509 j = adapter->rx_ring[i]->reg_idx;
2365 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j)); 2510 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
2366 ctrl |= IXGBE_RXDCTL_VME; 2511 ctrl |= IXGBE_RXDCTL_VME;
2367 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl); 2512 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
@@ -2414,7 +2559,7 @@ static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
2414 * responsible for configuring the hardware for proper unicast, multicast and 2559 * responsible for configuring the hardware for proper unicast, multicast and
2415 * promiscuous mode. 2560 * promiscuous mode.
2416 **/ 2561 **/
2417static void ixgbe_set_rx_mode(struct net_device *netdev) 2562void ixgbe_set_rx_mode(struct net_device *netdev)
2418{ 2563{
2419 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2564 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2420 struct ixgbe_hw *hw = &adapter->hw; 2565 struct ixgbe_hw *hw = &adapter->hw;
@@ -2446,14 +2591,16 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
2446 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 2591 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2447 2592
2448 /* reprogram secondary unicast list */ 2593 /* reprogram secondary unicast list */
2449 hw->mac.ops.update_uc_addr_list(hw, &netdev->uc.list); 2594 hw->mac.ops.update_uc_addr_list(hw, netdev);
2450 2595
2451 /* reprogram multicast list */ 2596 /* reprogram multicast list */
2452 addr_count = netdev->mc_count; 2597 addr_count = netdev_mc_count(netdev);
2453 if (addr_count) 2598 if (addr_count)
2454 addr_list = netdev->mc_list->dmi_addr; 2599 addr_list = netdev->mc_list->dmi_addr;
2455 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, 2600 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
2456 ixgbe_addr_list_itr); 2601 ixgbe_addr_list_itr);
2602 if (adapter->num_vfs)
2603 ixgbe_restore_vf_multicasts(adapter);
2457} 2604}
2458 2605
2459static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) 2606static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -2522,7 +2669,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
2522 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg); 2669 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
2523 2670
2524 for (i = 0; i < adapter->num_tx_queues; i++) { 2671 for (i = 0; i < adapter->num_tx_queues; i++) {
2525 j = adapter->tx_ring[i].reg_idx; 2672 j = adapter->tx_ring[i]->reg_idx;
2526 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 2673 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2527 /* PThresh workaround for Tx hang with DFP enabled. */ 2674 /* PThresh workaround for Tx hang with DFP enabled. */
2528 txdctl |= 32; 2675 txdctl |= 32;
@@ -2539,7 +2686,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
2539 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; 2686 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2540 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 2687 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2541 for (i = 0; i < adapter->num_rx_queues; i++) { 2688 for (i = 0; i < adapter->num_rx_queues; i++) {
2542 j = adapter->rx_ring[i].reg_idx; 2689 j = adapter->rx_ring[i]->reg_idx;
2543 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 2690 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2544 vlnctrl |= IXGBE_RXDCTL_VME; 2691 vlnctrl |= IXGBE_RXDCTL_VME;
2545 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); 2692 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
@@ -2579,7 +2726,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
2579#endif /* IXGBE_FCOE */ 2726#endif /* IXGBE_FCOE */
2580 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 2727 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2581 for (i = 0; i < adapter->num_tx_queues; i++) 2728 for (i = 0; i < adapter->num_tx_queues; i++)
2582 adapter->tx_ring[i].atr_sample_rate = 2729 adapter->tx_ring[i]->atr_sample_rate =
2583 adapter->atr_sample_rate; 2730 adapter->atr_sample_rate;
2584 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); 2731 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
2585 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { 2732 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
@@ -2589,8 +2736,8 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
2589 ixgbe_configure_tx(adapter); 2736 ixgbe_configure_tx(adapter);
2590 ixgbe_configure_rx(adapter); 2737 ixgbe_configure_rx(adapter);
2591 for (i = 0; i < adapter->num_rx_queues; i++) 2738 for (i = 0; i < adapter->num_rx_queues; i++)
2592 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i], 2739 ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i],
2593 (adapter->rx_ring[i].count - 1)); 2740 (adapter->rx_ring[i]->count - 1));
2594} 2741}
2595 2742
2596static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) 2743static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
@@ -2673,7 +2820,7 @@ link_cfg_out:
2673static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, 2820static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2674 int rxr) 2821 int rxr)
2675{ 2822{
2676 int j = adapter->rx_ring[rxr].reg_idx; 2823 int j = adapter->rx_ring[rxr]->reg_idx;
2677 int k; 2824 int k;
2678 2825
2679 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { 2826 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
@@ -2687,8 +2834,8 @@ static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2687 DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d " 2834 DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
2688 "not set within the polling period\n", rxr); 2835 "not set within the polling period\n", rxr);
2689 } 2836 }
2690 ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr], 2837 ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
2691 (adapter->rx_ring[rxr].count - 1)); 2838 (adapter->rx_ring[rxr]->count - 1));
2692} 2839}
2693 2840
2694static int ixgbe_up_complete(struct ixgbe_adapter *adapter) 2841static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
@@ -2702,6 +2849,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2702 u32 txdctl, rxdctl, mhadd; 2849 u32 txdctl, rxdctl, mhadd;
2703 u32 dmatxctl; 2850 u32 dmatxctl;
2704 u32 gpie; 2851 u32 gpie;
2852 u32 ctrl_ext;
2705 2853
2706 ixgbe_get_hw_control(adapter); 2854 ixgbe_get_hw_control(adapter);
2707 2855
@@ -2714,6 +2862,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2714 /* MSI only */ 2862 /* MSI only */
2715 gpie = 0; 2863 gpie = 0;
2716 } 2864 }
2865 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2866 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
2867 gpie |= IXGBE_GPIE_VTMODE_64;
2868 }
2717 /* XXX: to interrupt immediately for EICS writes, enable this */ 2869 /* XXX: to interrupt immediately for EICS writes, enable this */
2718 /* gpie |= IXGBE_GPIE_EIMEN; */ 2870 /* gpie |= IXGBE_GPIE_EIMEN; */
2719 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2871 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
@@ -2770,7 +2922,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2770 } 2922 }
2771 2923
2772 for (i = 0; i < adapter->num_tx_queues; i++) { 2924 for (i = 0; i < adapter->num_tx_queues; i++) {
2773 j = adapter->tx_ring[i].reg_idx; 2925 j = adapter->tx_ring[i]->reg_idx;
2774 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 2926 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2775 /* enable WTHRESH=8 descriptors, to encourage burst writeback */ 2927 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2776 txdctl |= (8 << 16); 2928 txdctl |= (8 << 16);
@@ -2784,14 +2936,26 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2784 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 2936 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2785 } 2937 }
2786 for (i = 0; i < adapter->num_tx_queues; i++) { 2938 for (i = 0; i < adapter->num_tx_queues; i++) {
2787 j = adapter->tx_ring[i].reg_idx; 2939 j = adapter->tx_ring[i]->reg_idx;
2788 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 2940 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2789 txdctl |= IXGBE_TXDCTL_ENABLE; 2941 txdctl |= IXGBE_TXDCTL_ENABLE;
2790 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 2942 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2943 if (hw->mac.type == ixgbe_mac_82599EB) {
2944 int wait_loop = 10;
2945 /* poll for Tx Enable ready */
2946 do {
2947 msleep(1);
2948 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2949 } while (--wait_loop &&
2950 !(txdctl & IXGBE_TXDCTL_ENABLE));
2951 if (!wait_loop)
2952 DPRINTK(DRV, ERR, "Could not enable "
2953 "Tx Queue %d\n", j);
2954 }
2791 } 2955 }
2792 2956
2793 for (i = 0; i < num_rx_rings; i++) { 2957 for (i = 0; i < num_rx_rings; i++) {
2794 j = adapter->rx_ring[i].reg_idx; 2958 j = adapter->rx_ring[i]->reg_idx;
2795 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 2959 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2796 /* enable PTHRESH=32 descriptors (half the internal cache) 2960 /* enable PTHRESH=32 descriptors (half the internal cache)
2797 * and HTHRESH=0 descriptors (to minimize latency on fetch), 2961 * and HTHRESH=0 descriptors (to minimize latency on fetch),
@@ -2865,7 +3029,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2865 3029
2866 for (i = 0; i < adapter->num_tx_queues; i++) 3030 for (i = 0; i < adapter->num_tx_queues; i++)
2867 set_bit(__IXGBE_FDIR_INIT_DONE, 3031 set_bit(__IXGBE_FDIR_INIT_DONE,
2868 &(adapter->tx_ring[i].reinit_state)); 3032 &(adapter->tx_ring[i]->reinit_state));
2869 3033
2870 /* enable transmits */ 3034 /* enable transmits */
2871 netif_tx_start_all_queues(netdev); 3035 netif_tx_start_all_queues(netdev);
@@ -2875,6 +3039,12 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2875 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 3039 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2876 adapter->link_check_timeout = jiffies; 3040 adapter->link_check_timeout = jiffies;
2877 mod_timer(&adapter->watchdog_timer, jiffies); 3041 mod_timer(&adapter->watchdog_timer, jiffies);
3042
3043 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
3044 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3045 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3046 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3047
2878 return 0; 3048 return 0;
2879} 3049}
2880 3050
@@ -2923,7 +3093,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
2923 } 3093 }
2924 3094
2925 /* reprogram the RAR[0] in case user changed it. */ 3095 /* reprogram the RAR[0] in case user changed it. */
2926 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 3096 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
3097 IXGBE_RAH_AV);
2927} 3098}
2928 3099
2929/** 3100/**
@@ -2955,6 +3126,10 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
2955 rx_buffer_info->skb = NULL; 3126 rx_buffer_info->skb = NULL;
2956 do { 3127 do {
2957 struct sk_buff *this = skb; 3128 struct sk_buff *this = skb;
3129 if (IXGBE_RSC_CB(this)->dma)
3130 pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma,
3131 rx_ring->rx_buf_len,
3132 PCI_DMA_FROMDEVICE);
2958 skb = skb->prev; 3133 skb = skb->prev;
2959 dev_kfree_skb(this); 3134 dev_kfree_skb(this);
2960 } while (skb); 3135 } while (skb);
@@ -3029,7 +3204,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
3029 int i; 3204 int i;
3030 3205
3031 for (i = 0; i < adapter->num_rx_queues; i++) 3206 for (i = 0; i < adapter->num_rx_queues; i++)
3032 ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]); 3207 ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
3033} 3208}
3034 3209
3035/** 3210/**
@@ -3041,7 +3216,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
3041 int i; 3216 int i;
3042 3217
3043 for (i = 0; i < adapter->num_tx_queues; i++) 3218 for (i = 0; i < adapter->num_tx_queues; i++)
3044 ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]); 3219 ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
3045} 3220}
3046 3221
3047void ixgbe_down(struct ixgbe_adapter *adapter) 3222void ixgbe_down(struct ixgbe_adapter *adapter)
@@ -3055,6 +3230,17 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3055 /* signal that we are down to the interrupt handler */ 3230 /* signal that we are down to the interrupt handler */
3056 set_bit(__IXGBE_DOWN, &adapter->state); 3231 set_bit(__IXGBE_DOWN, &adapter->state);
3057 3232
3233 /* disable receive for all VFs and wait one second */
3234 if (adapter->num_vfs) {
3235 for (i = 0 ; i < adapter->num_vfs; i++)
3236 adapter->vfinfo[i].clear_to_send = 0;
3237
3238 /* ping all the active vfs to let them know we are going down */
3239 ixgbe_ping_all_vfs(adapter);
3240 /* Disable all VFTE/VFRE TX/RX */
3241 ixgbe_disable_tx_rx(adapter);
3242 }
3243
3058 /* disable receives */ 3244 /* disable receives */
3059 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 3245 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3060 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 3246 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
@@ -3081,7 +3267,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3081 3267
3082 /* disable transmits in the hardware now that interrupts are off */ 3268 /* disable transmits in the hardware now that interrupts are off */
3083 for (i = 0; i < adapter->num_tx_queues; i++) { 3269 for (i = 0; i < adapter->num_tx_queues; i++) {
3084 j = adapter->tx_ring[i].reg_idx; 3270 j = adapter->tx_ring[i]->reg_idx;
3085 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 3271 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3086 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), 3272 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
3087 (txdctl & ~IXGBE_TXDCTL_ENABLE)); 3273 (txdctl & ~IXGBE_TXDCTL_ENABLE));
@@ -3094,6 +3280,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3094 3280
3095 netif_carrier_off(netdev); 3281 netif_carrier_off(netdev);
3096 3282
3283 /* clear n-tuple filters that are cached */
3284 ethtool_ntuple_flush(netdev);
3285
3097 if (!pci_channel_offline(adapter->pdev)) 3286 if (!pci_channel_offline(adapter->pdev))
3098 ixgbe_reset(adapter); 3287 ixgbe_reset(adapter);
3099 ixgbe_clean_all_tx_rings(adapter); 3288 ixgbe_clean_all_tx_rings(adapter);
@@ -3121,13 +3310,13 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
3121 3310
3122#ifdef CONFIG_IXGBE_DCA 3311#ifdef CONFIG_IXGBE_DCA
3123 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 3312 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
3124 ixgbe_update_tx_dca(adapter, adapter->tx_ring); 3313 ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
3125 ixgbe_update_rx_dca(adapter, adapter->rx_ring); 3314 ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
3126 } 3315 }
3127#endif 3316#endif
3128 3317
3129 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring); 3318 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
3130 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget); 3319 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
3131 3320
3132 if (!tx_clean_complete) 3321 if (!tx_clean_complete)
3133 work_done = budget; 3322 work_done = budget;
@@ -3291,6 +3480,19 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3291} 3480}
3292 3481
3293#endif /* IXGBE_FCOE */ 3482#endif /* IXGBE_FCOE */
3483/**
3484 * ixgbe_set_sriov_queues: Allocate queues for IOV use
3485 * @adapter: board private structure to initialize
3486 *
3487 * IOV doesn't actually use anything, so just NAK the
3488 * request for now and let the other queue routines
3489 * figure out what to do.
3490 */
3491static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
3492{
3493 return false;
3494}
3495
3294/* 3496/*
3295 * ixgbe_set_num_queues: Allocate queues for device, feature dependant 3497 * ixgbe_set_num_queues: Allocate queues for device, feature dependant
3296 * @adapter: board private structure to initialize 3498 * @adapter: board private structure to initialize
@@ -3304,6 +3506,15 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3304 **/ 3506 **/
3305static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 3507static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
3306{ 3508{
3509 /* Start with base case */
3510 adapter->num_rx_queues = 1;
3511 adapter->num_tx_queues = 1;
3512 adapter->num_rx_pools = adapter->num_rx_queues;
3513 adapter->num_rx_queues_per_pool = 1;
3514
3515 if (ixgbe_set_sriov_queues(adapter))
3516 return;
3517
3307#ifdef IXGBE_FCOE 3518#ifdef IXGBE_FCOE
3308 if (ixgbe_set_fcoe_queues(adapter)) 3519 if (ixgbe_set_fcoe_queues(adapter))
3309 goto done; 3520 goto done;
@@ -3393,9 +3604,9 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
3393 3604
3394 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 3605 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3395 for (i = 0; i < adapter->num_rx_queues; i++) 3606 for (i = 0; i < adapter->num_rx_queues; i++)
3396 adapter->rx_ring[i].reg_idx = i; 3607 adapter->rx_ring[i]->reg_idx = i;
3397 for (i = 0; i < adapter->num_tx_queues; i++) 3608 for (i = 0; i < adapter->num_tx_queues; i++)
3398 adapter->tx_ring[i].reg_idx = i; 3609 adapter->tx_ring[i]->reg_idx = i;
3399 ret = true; 3610 ret = true;
3400 } else { 3611 } else {
3401 ret = false; 3612 ret = false;
@@ -3422,8 +3633,8 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
3422 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 3633 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3423 /* the number of queues is assumed to be symmetric */ 3634 /* the number of queues is assumed to be symmetric */
3424 for (i = 0; i < dcb_i; i++) { 3635 for (i = 0; i < dcb_i; i++) {
3425 adapter->rx_ring[i].reg_idx = i << 3; 3636 adapter->rx_ring[i]->reg_idx = i << 3;
3426 adapter->tx_ring[i].reg_idx = i << 2; 3637 adapter->tx_ring[i]->reg_idx = i << 2;
3427 } 3638 }
3428 ret = true; 3639 ret = true;
3429 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 3640 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
@@ -3441,18 +3652,18 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
3441 * Rx TC0-TC7 are offset by 16 queues each 3652 * Rx TC0-TC7 are offset by 16 queues each
3442 */ 3653 */
3443 for (i = 0; i < 3; i++) { 3654 for (i = 0; i < 3; i++) {
3444 adapter->tx_ring[i].reg_idx = i << 5; 3655 adapter->tx_ring[i]->reg_idx = i << 5;
3445 adapter->rx_ring[i].reg_idx = i << 4; 3656 adapter->rx_ring[i]->reg_idx = i << 4;
3446 } 3657 }
3447 for ( ; i < 5; i++) { 3658 for ( ; i < 5; i++) {
3448 adapter->tx_ring[i].reg_idx = 3659 adapter->tx_ring[i]->reg_idx =
3449 ((i + 2) << 4); 3660 ((i + 2) << 4);
3450 adapter->rx_ring[i].reg_idx = i << 4; 3661 adapter->rx_ring[i]->reg_idx = i << 4;
3451 } 3662 }
3452 for ( ; i < dcb_i; i++) { 3663 for ( ; i < dcb_i; i++) {
3453 adapter->tx_ring[i].reg_idx = 3664 adapter->tx_ring[i]->reg_idx =
3454 ((i + 8) << 3); 3665 ((i + 8) << 3);
3455 adapter->rx_ring[i].reg_idx = i << 4; 3666 adapter->rx_ring[i]->reg_idx = i << 4;
3456 } 3667 }
3457 3668
3458 ret = true; 3669 ret = true;
@@ -3465,12 +3676,12 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
3465 * 3676 *
3466 * Rx TC0-TC3 are offset by 32 queues each 3677 * Rx TC0-TC3 are offset by 32 queues each
3467 */ 3678 */
3468 adapter->tx_ring[0].reg_idx = 0; 3679 adapter->tx_ring[0]->reg_idx = 0;
3469 adapter->tx_ring[1].reg_idx = 64; 3680 adapter->tx_ring[1]->reg_idx = 64;
3470 adapter->tx_ring[2].reg_idx = 96; 3681 adapter->tx_ring[2]->reg_idx = 96;
3471 adapter->tx_ring[3].reg_idx = 112; 3682 adapter->tx_ring[3]->reg_idx = 112;
3472 for (i = 0 ; i < dcb_i; i++) 3683 for (i = 0 ; i < dcb_i; i++)
3473 adapter->rx_ring[i].reg_idx = i << 5; 3684 adapter->rx_ring[i]->reg_idx = i << 5;
3474 3685
3475 ret = true; 3686 ret = true;
3476 } else { 3687 } else {
@@ -3503,9 +3714,9 @@ static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
3503 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 3714 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3504 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) { 3715 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
3505 for (i = 0; i < adapter->num_rx_queues; i++) 3716 for (i = 0; i < adapter->num_rx_queues; i++)
3506 adapter->rx_ring[i].reg_idx = i; 3717 adapter->rx_ring[i]->reg_idx = i;
3507 for (i = 0; i < adapter->num_tx_queues; i++) 3718 for (i = 0; i < adapter->num_tx_queues; i++)
3508 adapter->tx_ring[i].reg_idx = i; 3719 adapter->tx_ring[i]->reg_idx = i;
3509 ret = true; 3720 ret = true;
3510 } 3721 }
3511 3722
@@ -3533,8 +3744,8 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
3533 3744
3534 ixgbe_cache_ring_dcb(adapter); 3745 ixgbe_cache_ring_dcb(adapter);
3535 /* find out queues in TC for FCoE */ 3746 /* find out queues in TC for FCoE */
3536 fcoe_rx_i = adapter->rx_ring[fcoe->tc].reg_idx + 1; 3747 fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
3537 fcoe_tx_i = adapter->tx_ring[fcoe->tc].reg_idx + 1; 3748 fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
3538 /* 3749 /*
3539 * In 82599, the number of Tx queues for each traffic 3750 * In 82599, the number of Tx queues for each traffic
3540 * class for both 8-TC and 4-TC modes are: 3751 * class for both 8-TC and 4-TC modes are:
@@ -3565,8 +3776,8 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
3565 fcoe_tx_i = f->mask; 3776 fcoe_tx_i = f->mask;
3566 } 3777 }
3567 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { 3778 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
3568 adapter->rx_ring[f->mask + i].reg_idx = fcoe_rx_i; 3779 adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
3569 adapter->tx_ring[f->mask + i].reg_idx = fcoe_tx_i; 3780 adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
3570 } 3781 }
3571 ret = true; 3782 ret = true;
3572 } 3783 }
@@ -3575,6 +3786,24 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
3575 3786
3576#endif /* IXGBE_FCOE */ 3787#endif /* IXGBE_FCOE */
3577/** 3788/**
3789 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
3790 * @adapter: board private structure to initialize
3791 *
3792 * SR-IOV doesn't use any descriptor rings but changes the default if
3793 * no other mapping is used.
3794 *
3795 */
3796static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
3797{
3798 adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
3799 adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
3800 if (adapter->num_vfs)
3801 return true;
3802 else
3803 return false;
3804}
3805
3806/**
3578 * ixgbe_cache_ring_register - Descriptor ring to register mapping 3807 * ixgbe_cache_ring_register - Descriptor ring to register mapping
3579 * @adapter: board private structure to initialize 3808 * @adapter: board private structure to initialize
3580 * 3809 *
@@ -3588,8 +3817,11 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
3588static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) 3817static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
3589{ 3818{
3590 /* start with default case */ 3819 /* start with default case */
3591 adapter->rx_ring[0].reg_idx = 0; 3820 adapter->rx_ring[0]->reg_idx = 0;
3592 adapter->tx_ring[0].reg_idx = 0; 3821 adapter->tx_ring[0]->reg_idx = 0;
3822
3823 if (ixgbe_cache_ring_sriov(adapter))
3824 return;
3593 3825
3594#ifdef IXGBE_FCOE 3826#ifdef IXGBE_FCOE
3595 if (ixgbe_cache_ring_fcoe(adapter)) 3827 if (ixgbe_cache_ring_fcoe(adapter))
@@ -3619,33 +3851,63 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
3619static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) 3851static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
3620{ 3852{
3621 int i; 3853 int i;
3622 3854 int orig_node = adapter->node;
3623 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
3624 sizeof(struct ixgbe_ring), GFP_KERNEL);
3625 if (!adapter->tx_ring)
3626 goto err_tx_ring_allocation;
3627
3628 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
3629 sizeof(struct ixgbe_ring), GFP_KERNEL);
3630 if (!adapter->rx_ring)
3631 goto err_rx_ring_allocation;
3632 3855
3633 for (i = 0; i < adapter->num_tx_queues; i++) { 3856 for (i = 0; i < adapter->num_tx_queues; i++) {
3634 adapter->tx_ring[i].count = adapter->tx_ring_count; 3857 struct ixgbe_ring *ring = adapter->tx_ring[i];
3635 adapter->tx_ring[i].queue_index = i; 3858 if (orig_node == -1) {
3859 int cur_node = next_online_node(adapter->node);
3860 if (cur_node == MAX_NUMNODES)
3861 cur_node = first_online_node;
3862 adapter->node = cur_node;
3863 }
3864 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
3865 adapter->node);
3866 if (!ring)
3867 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
3868 if (!ring)
3869 goto err_tx_ring_allocation;
3870 ring->count = adapter->tx_ring_count;
3871 ring->queue_index = i;
3872 ring->numa_node = adapter->node;
3873
3874 adapter->tx_ring[i] = ring;
3636 } 3875 }
3637 3876
3877 /* Restore the adapter's original node */
3878 adapter->node = orig_node;
3879
3638 for (i = 0; i < adapter->num_rx_queues; i++) { 3880 for (i = 0; i < adapter->num_rx_queues; i++) {
3639 adapter->rx_ring[i].count = adapter->rx_ring_count; 3881 struct ixgbe_ring *ring = adapter->rx_ring[i];
3640 adapter->rx_ring[i].queue_index = i; 3882 if (orig_node == -1) {
3883 int cur_node = next_online_node(adapter->node);
3884 if (cur_node == MAX_NUMNODES)
3885 cur_node = first_online_node;
3886 adapter->node = cur_node;
3887 }
3888 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
3889 adapter->node);
3890 if (!ring)
3891 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
3892 if (!ring)
3893 goto err_rx_ring_allocation;
3894 ring->count = adapter->rx_ring_count;
3895 ring->queue_index = i;
3896 ring->numa_node = adapter->node;
3897
3898 adapter->rx_ring[i] = ring;
3641 } 3899 }
3642 3900
3901 /* Restore the adapter's original node */
3902 adapter->node = orig_node;
3903
3643 ixgbe_cache_ring_register(adapter); 3904 ixgbe_cache_ring_register(adapter);
3644 3905
3645 return 0; 3906 return 0;
3646 3907
3647err_rx_ring_allocation: 3908err_rx_ring_allocation:
3648 kfree(adapter->tx_ring); 3909 for (i = 0; i < adapter->num_tx_queues; i++)
3910 kfree(adapter->tx_ring[i]);
3649err_tx_ring_allocation: 3911err_tx_ring_allocation:
3650 return -ENOMEM; 3912 return -ENOMEM;
3651} 3913}
@@ -3700,6 +3962,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
3700 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 3962 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
3701 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 3963 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
3702 adapter->atr_sample_rate = 0; 3964 adapter->atr_sample_rate = 0;
3965 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3966 ixgbe_disable_sriov(adapter);
3967
3703 ixgbe_set_num_queues(adapter); 3968 ixgbe_set_num_queues(adapter);
3704 3969
3705 err = pci_enable_msi(adapter->pdev); 3970 err = pci_enable_msi(adapter->pdev);
@@ -3741,7 +4006,11 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
3741 } 4006 }
3742 4007
3743 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 4008 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
3744 q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL); 4009 q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
4010 GFP_KERNEL, adapter->node);
4011 if (!q_vector)
4012 q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
4013 GFP_KERNEL);
3745 if (!q_vector) 4014 if (!q_vector)
3746 goto err_out; 4015 goto err_out;
3747 q_vector->adapter = adapter; 4016 q_vector->adapter = adapter;
@@ -3868,10 +4137,16 @@ err_set_interrupt:
3868 **/ 4137 **/
3869void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) 4138void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
3870{ 4139{
3871 kfree(adapter->tx_ring); 4140 int i;
3872 kfree(adapter->rx_ring); 4141
3873 adapter->tx_ring = NULL; 4142 for (i = 0; i < adapter->num_tx_queues; i++) {
3874 adapter->rx_ring = NULL; 4143 kfree(adapter->tx_ring[i]);
4144 adapter->tx_ring[i] = NULL;
4145 }
4146 for (i = 0; i < adapter->num_rx_queues; i++) {
4147 kfree(adapter->rx_ring[i]);
4148 adapter->rx_ring[i] = NULL;
4149 }
3875 4150
3876 ixgbe_free_q_vectors(adapter); 4151 ixgbe_free_q_vectors(adapter);
3877 ixgbe_reset_interrupt_capability(adapter); 4152 ixgbe_reset_interrupt_capability(adapter);
@@ -3942,6 +4217,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
3942{ 4217{
3943 struct ixgbe_hw *hw = &adapter->hw; 4218 struct ixgbe_hw *hw = &adapter->hw;
3944 struct pci_dev *pdev = adapter->pdev; 4219 struct pci_dev *pdev = adapter->pdev;
4220 struct net_device *dev = adapter->netdev;
3945 unsigned int rss; 4221 unsigned int rss;
3946#ifdef CONFIG_IXGBE_DCB 4222#ifdef CONFIG_IXGBE_DCB
3947 int j; 4223 int j;
@@ -3969,10 +4245,18 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
3969 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; 4245 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
3970 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; 4246 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
3971 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 4247 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
3972 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 4248 if (dev->features & NETIF_F_NTUPLE) {
4249 /* Flow Director perfect filter enabled */
4250 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4251 adapter->atr_sample_rate = 0;
4252 spin_lock_init(&adapter->fdir_perfect_lock);
4253 } else {
4254 /* Flow Director hash filters enabled */
4255 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
4256 adapter->atr_sample_rate = 20;
4257 }
3973 adapter->ring_feature[RING_F_FDIR].indices = 4258 adapter->ring_feature[RING_F_FDIR].indices =
3974 IXGBE_MAX_FDIR_INDICES; 4259 IXGBE_MAX_FDIR_INDICES;
3975 adapter->atr_sample_rate = 20;
3976 adapter->fdir_pballoc = 0; 4260 adapter->fdir_pballoc = 0;
3977#ifdef IXGBE_FCOE 4261#ifdef IXGBE_FCOE
3978 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; 4262 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
@@ -4041,6 +4325,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4041 /* enable rx csum by default */ 4325 /* enable rx csum by default */
4042 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; 4326 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
4043 4327
4328 /* get assigned NUMA node */
4329 adapter->node = dev_to_node(&pdev->dev);
4330
4044 set_bit(__IXGBE_DOWN, &adapter->state); 4331 set_bit(__IXGBE_DOWN, &adapter->state);
4045 4332
4046 return 0; 4333 return 0;
@@ -4060,7 +4347,9 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
4060 int size; 4347 int size;
4061 4348
4062 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 4349 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4063 tx_ring->tx_buffer_info = vmalloc(size); 4350 tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
4351 if (!tx_ring->tx_buffer_info)
4352 tx_ring->tx_buffer_info = vmalloc(size);
4064 if (!tx_ring->tx_buffer_info) 4353 if (!tx_ring->tx_buffer_info)
4065 goto err; 4354 goto err;
4066 memset(tx_ring->tx_buffer_info, 0, size); 4355 memset(tx_ring->tx_buffer_info, 0, size);
@@ -4102,7 +4391,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
4102 int i, err = 0; 4391 int i, err = 0;
4103 4392
4104 for (i = 0; i < adapter->num_tx_queues; i++) { 4393 for (i = 0; i < adapter->num_tx_queues; i++) {
4105 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]); 4394 err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
4106 if (!err) 4395 if (!err)
4107 continue; 4396 continue;
4108 DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i); 4397 DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
@@ -4126,7 +4415,9 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
4126 int size; 4415 int size;
4127 4416
4128 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 4417 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4129 rx_ring->rx_buffer_info = vmalloc(size); 4418 rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
4419 if (!rx_ring->rx_buffer_info)
4420 rx_ring->rx_buffer_info = vmalloc(size);
4130 if (!rx_ring->rx_buffer_info) { 4421 if (!rx_ring->rx_buffer_info) {
4131 DPRINTK(PROBE, ERR, 4422 DPRINTK(PROBE, ERR,
4132 "vmalloc allocation failed for the rx desc ring\n"); 4423 "vmalloc allocation failed for the rx desc ring\n");
@@ -4172,7 +4463,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
4172 int i, err = 0; 4463 int i, err = 0;
4173 4464
4174 for (i = 0; i < adapter->num_rx_queues; i++) { 4465 for (i = 0; i < adapter->num_rx_queues; i++) {
4175 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]); 4466 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
4176 if (!err) 4467 if (!err)
4177 continue; 4468 continue;
4178 DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i); 4469 DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
@@ -4215,8 +4506,8 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
4215 int i; 4506 int i;
4216 4507
4217 for (i = 0; i < adapter->num_tx_queues; i++) 4508 for (i = 0; i < adapter->num_tx_queues; i++)
4218 if (adapter->tx_ring[i].desc) 4509 if (adapter->tx_ring[i]->desc)
4219 ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]); 4510 ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
4220} 4511}
4221 4512
4222/** 4513/**
@@ -4252,8 +4543,8 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
4252 int i; 4543 int i;
4253 4544
4254 for (i = 0; i < adapter->num_rx_queues; i++) 4545 for (i = 0; i < adapter->num_rx_queues; i++)
4255 if (adapter->rx_ring[i].desc) 4546 if (adapter->rx_ring[i]->desc)
4256 ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]); 4547 ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
4257} 4548}
4258 4549
4259/** 4550/**
@@ -4530,8 +4821,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4530 adapter->hw_rx_no_dma_resources += 4821 adapter->hw_rx_no_dma_resources +=
4531 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 4822 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
4532 for (i = 0; i < adapter->num_rx_queues; i++) { 4823 for (i = 0; i < adapter->num_rx_queues; i++) {
4533 rsc_count += adapter->rx_ring[i].rsc_count; 4824 rsc_count += adapter->rx_ring[i]->rsc_count;
4534 rsc_flush += adapter->rx_ring[i].rsc_flush; 4825 rsc_flush += adapter->rx_ring[i]->rsc_flush;
4535 } 4826 }
4536 adapter->rsc_total_count = rsc_count; 4827 adapter->rsc_total_count = rsc_count;
4537 adapter->rsc_total_flush = rsc_flush; 4828 adapter->rsc_total_flush = rsc_flush;
@@ -4539,11 +4830,11 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4539 4830
4540 /* gather some stats to the adapter struct that are per queue */ 4831 /* gather some stats to the adapter struct that are per queue */
4541 for (i = 0; i < adapter->num_tx_queues; i++) 4832 for (i = 0; i < adapter->num_tx_queues; i++)
4542 restart_queue += adapter->tx_ring[i].restart_queue; 4833 restart_queue += adapter->tx_ring[i]->restart_queue;
4543 adapter->restart_queue = restart_queue; 4834 adapter->restart_queue = restart_queue;
4544 4835
4545 for (i = 0; i < adapter->num_rx_queues; i++) 4836 for (i = 0; i < adapter->num_rx_queues; i++)
4546 non_eop_descs += adapter->rx_ring[i].non_eop_descs; 4837 non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
4547 adapter->non_eop_descs = non_eop_descs; 4838 adapter->non_eop_descs = non_eop_descs;
4548 4839
4549 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 4840 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
@@ -4782,7 +5073,7 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
4782 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { 5073 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
4783 for (i = 0; i < adapter->num_tx_queues; i++) 5074 for (i = 0; i < adapter->num_tx_queues; i++)
4784 set_bit(__IXGBE_FDIR_INIT_DONE, 5075 set_bit(__IXGBE_FDIR_INIT_DONE,
4785 &(adapter->tx_ring[i].reinit_state)); 5076 &(adapter->tx_ring[i]->reinit_state));
4786 } else { 5077 } else {
4787 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, " 5078 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
4788 "ignored adding FDIR ATR filters \n"); 5079 "ignored adding FDIR ATR filters \n");
@@ -4791,6 +5082,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
4791 netif_tx_start_all_queues(adapter->netdev); 5082 netif_tx_start_all_queues(adapter->netdev);
4792} 5083}
4793 5084
5085static DEFINE_MUTEX(ixgbe_watchdog_lock);
5086
4794/** 5087/**
4795 * ixgbe_watchdog_task - worker thread to bring link up 5088 * ixgbe_watchdog_task - worker thread to bring link up
4796 * @work: pointer to work_struct containing our data 5089 * @work: pointer to work_struct containing our data
@@ -4802,13 +5095,16 @@ static void ixgbe_watchdog_task(struct work_struct *work)
4802 watchdog_task); 5095 watchdog_task);
4803 struct net_device *netdev = adapter->netdev; 5096 struct net_device *netdev = adapter->netdev;
4804 struct ixgbe_hw *hw = &adapter->hw; 5097 struct ixgbe_hw *hw = &adapter->hw;
4805 u32 link_speed = adapter->link_speed; 5098 u32 link_speed;
4806 bool link_up = adapter->link_up; 5099 bool link_up;
4807 int i; 5100 int i;
4808 struct ixgbe_ring *tx_ring; 5101 struct ixgbe_ring *tx_ring;
4809 int some_tx_pending = 0; 5102 int some_tx_pending = 0;
4810 5103
4811 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; 5104 mutex_lock(&ixgbe_watchdog_lock);
5105
5106 link_up = adapter->link_up;
5107 link_speed = adapter->link_speed;
4812 5108
4813 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 5109 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4814 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 5110 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
@@ -4879,7 +5175,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
4879 5175
4880 if (!netif_carrier_ok(netdev)) { 5176 if (!netif_carrier_ok(netdev)) {
4881 for (i = 0; i < adapter->num_tx_queues; i++) { 5177 for (i = 0; i < adapter->num_tx_queues; i++) {
4882 tx_ring = &adapter->tx_ring[i]; 5178 tx_ring = adapter->tx_ring[i];
4883 if (tx_ring->next_to_use != tx_ring->next_to_clean) { 5179 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
4884 some_tx_pending = 1; 5180 some_tx_pending = 1;
4885 break; 5181 break;
@@ -4897,7 +5193,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
4897 } 5193 }
4898 5194
4899 ixgbe_update_stats(adapter); 5195 ixgbe_update_stats(adapter);
4900 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; 5196 mutex_unlock(&ixgbe_watchdog_lock);
4901} 5197}
4902 5198
4903static int ixgbe_tso(struct ixgbe_adapter *adapter, 5199static int ixgbe_tso(struct ixgbe_adapter *adapter,
@@ -5343,8 +5639,14 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
5343 return txq; 5639 return txq;
5344 } 5640 }
5345#endif 5641#endif
5346 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) 5642 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
5347 return (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13; 5643 if (skb->priority == TC_PRIO_CONTROL)
5644 txq = adapter->ring_feature[RING_F_DCB].indices-1;
5645 else
5646 txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK)
5647 >> 13;
5648 return txq;
5649 }
5348 5650
5349 return skb_tx_hash(dev, skb); 5651 return skb_tx_hash(dev, skb);
5350} 5652}
@@ -5371,17 +5673,12 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
5371 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 5673 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
5372 tx_flags |= IXGBE_TX_FLAGS_VLAN; 5674 tx_flags |= IXGBE_TX_FLAGS_VLAN;
5373 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 5675 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
5374 if (skb->priority != TC_PRIO_CONTROL) { 5676 tx_flags |= ((skb->queue_mapping & 0x7) << 13);
5375 tx_flags |= ((skb->queue_mapping & 0x7) << 13); 5677 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
5376 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 5678 tx_flags |= IXGBE_TX_FLAGS_VLAN;
5377 tx_flags |= IXGBE_TX_FLAGS_VLAN;
5378 } else {
5379 skb->queue_mapping =
5380 adapter->ring_feature[RING_F_DCB].indices-1;
5381 }
5382 } 5679 }
5383 5680
5384 tx_ring = &adapter->tx_ring[skb->queue_mapping]; 5681 tx_ring = adapter->tx_ring[skb->queue_mapping];
5385 5682
5386 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && 5683 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
5387 (skb->protocol == htons(ETH_P_FCOE))) { 5684 (skb->protocol == htons(ETH_P_FCOE))) {
@@ -5487,7 +5784,8 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
5487 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 5784 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5488 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 5785 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
5489 5786
5490 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 5787 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
5788 IXGBE_RAH_AV);
5491 5789
5492 return 0; 5790 return 0;
5493} 5791}
@@ -5624,6 +5922,61 @@ static const struct net_device_ops ixgbe_netdev_ops = {
5624#endif /* IXGBE_FCOE */ 5922#endif /* IXGBE_FCOE */
5625}; 5923};
5626 5924
5925static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
5926 const struct ixgbe_info *ii)
5927{
5928#ifdef CONFIG_PCI_IOV
5929 struct ixgbe_hw *hw = &adapter->hw;
5930 int err;
5931
5932 if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
5933 return;
5934
5935 /* The 82599 supports up to 64 VFs per physical function
5936 * but this implementation limits allocation to 63 so that
5937 * basic networking resources are still available to the
5938 * physical function
5939 */
5940 adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
5941 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
5942 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
5943 if (err) {
5944 DPRINTK(PROBE, ERR,
5945 "Failed to enable PCI sriov: %d\n", err);
5946 goto err_novfs;
5947 }
5948 /* If call to enable VFs succeeded then allocate memory
5949 * for per VF control structures.
5950 */
5951 adapter->vfinfo =
5952 kcalloc(adapter->num_vfs,
5953 sizeof(struct vf_data_storage), GFP_KERNEL);
5954 if (adapter->vfinfo) {
5955 /* Now that we're sure SR-IOV is enabled
5956 * and memory allocated set up the mailbox parameters
5957 */
5958 ixgbe_init_mbx_params_pf(hw);
5959 memcpy(&hw->mbx.ops, ii->mbx_ops,
5960 sizeof(hw->mbx.ops));
5961
5962 /* Disable RSC when in SR-IOV mode */
5963 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
5964 IXGBE_FLAG2_RSC_ENABLED);
5965 return;
5966 }
5967
5968 /* Oh oh */
5969 DPRINTK(PROBE, ERR,
5970 "Unable to allocate memory for VF "
5971 "Data Storage - SRIOV disabled\n");
5972 pci_disable_sriov(adapter->pdev);
5973
5974err_novfs:
5975 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
5976 adapter->num_vfs = 0;
5977#endif /* CONFIG_PCI_IOV */
5978}
5979
5627/** 5980/**
5628 * ixgbe_probe - Device Initialization Routine 5981 * ixgbe_probe - Device Initialization Routine
5629 * @pdev: PCI device information struct 5982 * @pdev: PCI device information struct
@@ -5644,6 +5997,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5644 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; 5997 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
5645 static int cards_found; 5998 static int cards_found;
5646 int i, err, pci_using_dac; 5999 int i, err, pci_using_dac;
6000 unsigned int indices = num_possible_cpus();
5647#ifdef IXGBE_FCOE 6001#ifdef IXGBE_FCOE
5648 u16 device_caps; 6002 u16 device_caps;
5649#endif 6003#endif
@@ -5682,7 +6036,18 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5682 pci_set_master(pdev); 6036 pci_set_master(pdev);
5683 pci_save_state(pdev); 6037 pci_save_state(pdev);
5684 6038
5685 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES); 6039 if (ii->mac == ixgbe_mac_82598EB)
6040 indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
6041 else
6042 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
6043
6044 indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
6045#ifdef IXGBE_FCOE
6046 indices += min_t(unsigned int, num_possible_cpus(),
6047 IXGBE_MAX_FCOE_INDICES);
6048#endif
6049 indices = min_t(unsigned int, indices, MAX_TX_QUEUES);
6050 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
5686 if (!netdev) { 6051 if (!netdev) {
5687 err = -ENOMEM; 6052 err = -ENOMEM;
5688 goto err_alloc_etherdev; 6053 goto err_alloc_etherdev;
@@ -5802,6 +6167,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5802 goto err_sw_init; 6167 goto err_sw_init;
5803 } 6168 }
5804 6169
6170 ixgbe_probe_vf(adapter, ii);
6171
5805 netdev->features = NETIF_F_SG | 6172 netdev->features = NETIF_F_SG |
5806 NETIF_F_IP_CSUM | 6173 NETIF_F_IP_CSUM |
5807 NETIF_F_HW_VLAN_TX | 6174 NETIF_F_HW_VLAN_TX |
@@ -5822,6 +6189,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5822 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 6189 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
5823 netdev->vlan_features |= NETIF_F_SG; 6190 netdev->vlan_features |= NETIF_F_SG;
5824 6191
6192 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6193 adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
6194 IXGBE_FLAG_DCB_ENABLED);
5825 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) 6195 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
5826 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 6196 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
5827 6197
@@ -5948,6 +6318,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5948 ixgbe_setup_dca(adapter); 6318 ixgbe_setup_dca(adapter);
5949 } 6319 }
5950#endif 6320#endif
6321 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
6322 DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n",
6323 adapter->num_vfs);
6324 for (i = 0; i < adapter->num_vfs; i++)
6325 ixgbe_vf_configuration(pdev, (i | 0x10000000));
6326 }
6327
5951 /* add san mac addr to netdev */ 6328 /* add san mac addr to netdev */
5952 ixgbe_add_sanmac_netdev(netdev); 6329 ixgbe_add_sanmac_netdev(netdev);
5953 6330
@@ -5960,6 +6337,8 @@ err_register:
5960 ixgbe_clear_interrupt_scheme(adapter); 6337 ixgbe_clear_interrupt_scheme(adapter);
5961err_sw_init: 6338err_sw_init:
5962err_eeprom: 6339err_eeprom:
6340 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6341 ixgbe_disable_sriov(adapter);
5963 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 6342 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
5964 del_timer_sync(&adapter->sfp_timer); 6343 del_timer_sync(&adapter->sfp_timer);
5965 cancel_work_sync(&adapter->sfp_task); 6344 cancel_work_sync(&adapter->sfp_task);
@@ -6028,6 +6407,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
6028 if (netdev->reg_state == NETREG_REGISTERED) 6407 if (netdev->reg_state == NETREG_REGISTERED)
6029 unregister_netdev(netdev); 6408 unregister_netdev(netdev);
6030 6409
6410 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6411 ixgbe_disable_sriov(adapter);
6412
6031 ixgbe_clear_interrupt_scheme(adapter); 6413 ixgbe_clear_interrupt_scheme(adapter);
6032 6414
6033 ixgbe_release_hw_control(adapter); 6415 ixgbe_release_hw_control(adapter);
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
new file mode 100644
index 000000000000..d75f9148eb1f
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -0,0 +1,479 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/pci.h>
29#include <linux/delay.h>
30#include "ixgbe_type.h"
31#include "ixgbe_common.h"
32#include "ixgbe_mbx.h"
33
34/**
35 * ixgbe_read_mbx - Reads a message from the mailbox
36 * @hw: pointer to the HW structure
37 * @msg: The message buffer
38 * @size: Length of buffer
39 * @mbx_id: id of mailbox to read
40 *
41 * returns SUCCESS if it successfuly read message from buffer
42 **/
43s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
44{
45 struct ixgbe_mbx_info *mbx = &hw->mbx;
46 s32 ret_val = IXGBE_ERR_MBX;
47
48 /* limit read to size of mailbox */
49 if (size > mbx->size)
50 size = mbx->size;
51
52 if (mbx->ops.read)
53 ret_val = mbx->ops.read(hw, msg, size, mbx_id);
54
55 return ret_val;
56}
57
58/**
59 * ixgbe_write_mbx - Write a message to the mailbox
60 * @hw: pointer to the HW structure
61 * @msg: The message buffer
62 * @size: Length of buffer
63 * @mbx_id: id of mailbox to write
64 *
65 * returns SUCCESS if it successfully copied message into the buffer
66 **/
67s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
68{
69 struct ixgbe_mbx_info *mbx = &hw->mbx;
70 s32 ret_val = 0;
71
72 if (size > mbx->size)
73 ret_val = IXGBE_ERR_MBX;
74
75 else if (mbx->ops.write)
76 ret_val = mbx->ops.write(hw, msg, size, mbx_id);
77
78 return ret_val;
79}
80
81/**
82 * ixgbe_check_for_msg - checks to see if someone sent us mail
83 * @hw: pointer to the HW structure
84 * @mbx_id: id of mailbox to check
85 *
86 * returns SUCCESS if the Status bit was found or else ERR_MBX
87 **/
88s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
89{
90 struct ixgbe_mbx_info *mbx = &hw->mbx;
91 s32 ret_val = IXGBE_ERR_MBX;
92
93 if (mbx->ops.check_for_msg)
94 ret_val = mbx->ops.check_for_msg(hw, mbx_id);
95
96 return ret_val;
97}
98
99/**
100 * ixgbe_check_for_ack - checks to see if someone sent us ACK
101 * @hw: pointer to the HW structure
102 * @mbx_id: id of mailbox to check
103 *
104 * returns SUCCESS if the Status bit was found or else ERR_MBX
105 **/
106s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
107{
108 struct ixgbe_mbx_info *mbx = &hw->mbx;
109 s32 ret_val = IXGBE_ERR_MBX;
110
111 if (mbx->ops.check_for_ack)
112 ret_val = mbx->ops.check_for_ack(hw, mbx_id);
113
114 return ret_val;
115}
116
117/**
118 * ixgbe_check_for_rst - checks to see if other side has reset
119 * @hw: pointer to the HW structure
120 * @mbx_id: id of mailbox to check
121 *
122 * returns SUCCESS if the Status bit was found or else ERR_MBX
123 **/
124s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
125{
126 struct ixgbe_mbx_info *mbx = &hw->mbx;
127 s32 ret_val = IXGBE_ERR_MBX;
128
129 if (mbx->ops.check_for_rst)
130 ret_val = mbx->ops.check_for_rst(hw, mbx_id);
131
132 return ret_val;
133}
134
135/**
136 * ixgbe_poll_for_msg - Wait for message notification
137 * @hw: pointer to the HW structure
138 * @mbx_id: id of mailbox to write
139 *
140 * returns SUCCESS if it successfully received a message notification
141 **/
142static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
143{
144 struct ixgbe_mbx_info *mbx = &hw->mbx;
145 int countdown = mbx->timeout;
146
147 if (!countdown || !mbx->ops.check_for_msg)
148 goto out;
149
150 while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
151 countdown--;
152 if (!countdown)
153 break;
154 udelay(mbx->usec_delay);
155 }
156
157 /* if we failed, all future posted messages fail until reset */
158 if (!countdown)
159 mbx->timeout = 0;
160out:
161 return countdown ? 0 : IXGBE_ERR_MBX;
162}
163
164/**
165 * ixgbe_poll_for_ack - Wait for message acknowledgement
166 * @hw: pointer to the HW structure
167 * @mbx_id: id of mailbox to write
168 *
169 * returns SUCCESS if it successfully received a message acknowledgement
170 **/
171static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
172{
173 struct ixgbe_mbx_info *mbx = &hw->mbx;
174 int countdown = mbx->timeout;
175
176 if (!countdown || !mbx->ops.check_for_ack)
177 goto out;
178
179 while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
180 countdown--;
181 if (!countdown)
182 break;
183 udelay(mbx->usec_delay);
184 }
185
186 /* if we failed, all future posted messages fail until reset */
187 if (!countdown)
188 mbx->timeout = 0;
189out:
190 return countdown ? 0 : IXGBE_ERR_MBX;
191}
192
193/**
194 * ixgbe_read_posted_mbx - Wait for message notification and receive message
195 * @hw: pointer to the HW structure
196 * @msg: The message buffer
197 * @size: Length of buffer
198 * @mbx_id: id of mailbox to write
199 *
200 * returns SUCCESS if it successfully received a message notification and
201 * copied it into the receive buffer.
202 **/
203s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
204{
205 struct ixgbe_mbx_info *mbx = &hw->mbx;
206 s32 ret_val = IXGBE_ERR_MBX;
207
208 if (!mbx->ops.read)
209 goto out;
210
211 ret_val = ixgbe_poll_for_msg(hw, mbx_id);
212
213 /* if ack received read message, otherwise we timed out */
214 if (!ret_val)
215 ret_val = mbx->ops.read(hw, msg, size, mbx_id);
216out:
217 return ret_val;
218}
219
220/**
221 * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
222 * @hw: pointer to the HW structure
223 * @msg: The message buffer
224 * @size: Length of buffer
225 * @mbx_id: id of mailbox to write
226 *
227 * returns SUCCESS if it successfully copied message into the buffer and
228 * received an ack to that message within delay * timeout period
229 **/
230s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
231 u16 mbx_id)
232{
233 struct ixgbe_mbx_info *mbx = &hw->mbx;
234 s32 ret_val = IXGBE_ERR_MBX;
235
236 /* exit if either we can't write or there isn't a defined timeout */
237 if (!mbx->ops.write || !mbx->timeout)
238 goto out;
239
240 /* send msg */
241 ret_val = mbx->ops.write(hw, msg, size, mbx_id);
242
243 /* if msg sent wait until we receive an ack */
244 if (!ret_val)
245 ret_val = ixgbe_poll_for_ack(hw, mbx_id);
246out:
247 return ret_val;
248}
249
250/**
251 * ixgbe_init_mbx_ops_generic - Initialize MB function pointers
252 * @hw: pointer to the HW structure
253 *
254 * Setup the mailbox read and write message function pointers
255 **/
256void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
257{
258 struct ixgbe_mbx_info *mbx = &hw->mbx;
259
260 mbx->ops.read_posted = ixgbe_read_posted_mbx;
261 mbx->ops.write_posted = ixgbe_write_posted_mbx;
262}
263
264static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
265{
266 u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
267 s32 ret_val = IXGBE_ERR_MBX;
268
269 if (mbvficr & mask) {
270 ret_val = 0;
271 IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
272 }
273
274 return ret_val;
275}
276
277/**
278 * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
279 * @hw: pointer to the HW structure
280 * @vf_number: the VF index
281 *
282 * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
283 **/
284static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
285{
286 s32 ret_val = IXGBE_ERR_MBX;
287 s32 index = IXGBE_MBVFICR_INDEX(vf_number);
288 u32 vf_bit = vf_number % 16;
289
290 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
291 index)) {
292 ret_val = 0;
293 hw->mbx.stats.reqs++;
294 }
295
296 return ret_val;
297}
298
299/**
300 * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
301 * @hw: pointer to the HW structure
302 * @vf_number: the VF index
303 *
304 * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
305 **/
306static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
307{
308 s32 ret_val = IXGBE_ERR_MBX;
309 s32 index = IXGBE_MBVFICR_INDEX(vf_number);
310 u32 vf_bit = vf_number % 16;
311
312 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
313 index)) {
314 ret_val = 0;
315 hw->mbx.stats.acks++;
316 }
317
318 return ret_val;
319}
320
321/**
322 * ixgbe_check_for_rst_pf - checks to see if the VF has reset
323 * @hw: pointer to the HW structure
324 * @vf_number: the VF index
325 *
326 * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
327 **/
328static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
329{
330 u32 reg_offset = (vf_number < 32) ? 0 : 1;
331 u32 vf_shift = vf_number % 32;
332 u32 vflre = 0;
333 s32 ret_val = IXGBE_ERR_MBX;
334
335 if (hw->mac.type == ixgbe_mac_82599EB)
336 vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
337
338 if (vflre & (1 << vf_shift)) {
339 ret_val = 0;
340 IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
341 hw->mbx.stats.rsts++;
342 }
343
344 return ret_val;
345}
346
347/**
348 * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
349 * @hw: pointer to the HW structure
350 * @vf_number: the VF index
351 *
352 * return SUCCESS if we obtained the mailbox lock
353 **/
354static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
355{
356 s32 ret_val = IXGBE_ERR_MBX;
357 u32 p2v_mailbox;
358
359 /* Take ownership of the buffer */
360 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
361
362 /* reserve mailbox for vf use */
363 p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
364 if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
365 ret_val = 0;
366
367 return ret_val;
368}
369
370/**
371 * ixgbe_write_mbx_pf - Places a message in the mailbox
372 * @hw: pointer to the HW structure
373 * @msg: The message buffer
374 * @size: Length of buffer
375 * @vf_number: the VF index
376 *
377 * returns SUCCESS if it successfully copied message into the buffer
378 **/
379static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
380 u16 vf_number)
381{
382 s32 ret_val;
383 u16 i;
384
385 /* lock the mailbox to prevent pf/vf race condition */
386 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
387 if (ret_val)
388 goto out_no_write;
389
390 /* flush msg and acks as we are overwriting the message buffer */
391 ixgbe_check_for_msg_pf(hw, vf_number);
392 ixgbe_check_for_ack_pf(hw, vf_number);
393
394 /* copy the caller specified message to the mailbox memory buffer */
395 for (i = 0; i < size; i++)
396 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
397
398 /* Interrupt VF to tell it a message has been sent and release buffer*/
399 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
400
401 /* update stats */
402 hw->mbx.stats.msgs_tx++;
403
404out_no_write:
405 return ret_val;
406
407}
408
409/**
410 * ixgbe_read_mbx_pf - Read a message from the mailbox
411 * @hw: pointer to the HW structure
412 * @msg: The message buffer
413 * @size: Length of buffer
414 * @vf_number: the VF index
415 *
416 * This function copies a message from the mailbox buffer to the caller's
417 * memory buffer. The presumption is that the caller knows that there was
418 * a message due to a VF request so no polling for message is needed.
419 **/
420static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
421 u16 vf_number)
422{
423 s32 ret_val;
424 u16 i;
425
426 /* lock the mailbox to prevent pf/vf race condition */
427 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
428 if (ret_val)
429 goto out_no_read;
430
431 /* copy the message to the mailbox memory buffer */
432 for (i = 0; i < size; i++)
433 msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
434
435 /* Acknowledge the message and release buffer */
436 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
437
438 /* update stats */
439 hw->mbx.stats.msgs_rx++;
440
441out_no_read:
442 return ret_val;
443}
444
445/**
446 * ixgbe_init_mbx_params_pf - set initial values for pf mailbox
447 * @hw: pointer to the HW structure
448 *
449 * Initializes the hw->mbx struct to correct values for pf mailbox
450 */
451void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
452{
453 struct ixgbe_mbx_info *mbx = &hw->mbx;
454
455 if (hw->mac.type != ixgbe_mac_82599EB)
456 return;
457
458 mbx->timeout = 0;
459 mbx->usec_delay = 0;
460
461 mbx->size = IXGBE_VFMAILBOX_SIZE;
462
463 mbx->stats.msgs_tx = 0;
464 mbx->stats.msgs_rx = 0;
465 mbx->stats.reqs = 0;
466 mbx->stats.acks = 0;
467 mbx->stats.rsts = 0;
468}
469
470struct ixgbe_mbx_operations mbx_ops_82599 = {
471 .read = ixgbe_read_mbx_pf,
472 .write = ixgbe_write_mbx_pf,
473 .read_posted = ixgbe_read_posted_mbx,
474 .write_posted = ixgbe_write_posted_mbx,
475 .check_for_msg = ixgbe_check_for_msg_pf,
476 .check_for_ack = ixgbe_check_for_ack_pf,
477 .check_for_rst = ixgbe_check_for_rst_pf,
478};
479
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
new file mode 100644
index 000000000000..be7ab3309ab7
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -0,0 +1,96 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _IXGBE_MBX_H_
29#define _IXGBE_MBX_H_
30
31#include "ixgbe_type.h"
32
33#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
34#define IXGBE_ERR_MBX -100
35
36#define IXGBE_VFMAILBOX 0x002FC
37#define IXGBE_VFMBMEM 0x00200
38
39#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
40#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
41
42#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
43#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
44#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
45#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
46#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
47
48#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
49#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
50#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
51#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
52
53
54/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
55 * PF. The reverse is true if it is IXGBE_PF_*.
56 * Message ACK's are the value or'd with 0xF0000000
57 */
58#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
59 * this are the ACK */
60#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
61 * this are the NACK */
62#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
63 clear to send requests */
64#define IXGBE_VT_MSGINFO_SHIFT 16
65/* bits 23:16 are used for exra info for certain messages */
66#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
67
68#define IXGBE_VF_RESET 0x01 /* VF requests reset */
69#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
70#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
71#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
72#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
73
74/* length of permanent address message returned from PF */
75#define IXGBE_VF_PERMADDR_MSG_LEN 4
76/* word in permanent address message with the current multicast type */
77#define IXGBE_VF_MC_TYPE_WORD 3
78
79#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
80
81#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
82#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
83
84s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
85s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
86s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
87s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
88s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
89s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
90s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
91void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
92void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
93
94extern struct ixgbe_mbx_operations mbx_ops_82599;
95
96#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
new file mode 100644
index 000000000000..d4cd20f30199
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -0,0 +1,362 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29#include <linux/types.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/vmalloc.h>
34#include <linux/string.h>
35#include <linux/in.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/ipv6.h>
39#ifdef NETIF_F_HW_VLAN_TX
40#include <linux/if_vlan.h>
41#endif
42
43#include "ixgbe.h"
44
45#include "ixgbe_sriov.h"
46
47int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
48 int entries, u16 *hash_list, u32 vf)
49{
50 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
51 int i;
52
53 /* only so many hash values supported */
54 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
55
56 /*
57 * salt away the number of multi cast addresses assigned
58 * to this VF for later use to restore when the PF multi cast
59 * list changes
60 */
61 vfinfo->num_vf_mc_hashes = entries;
62
63 /*
64 * VFs are limited to using the MTA hash table for their multicast
65 * addresses
66 */
67 for (i = 0; i < entries; i++) {
68 vfinfo->vf_mc_hashes[i] = hash_list[i];;
69 }
70
71 /* Flush and reset the mta with the new values */
72 ixgbe_set_rx_mode(adapter->netdev);
73
74 return 0;
75}
76
77void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
78{
79 struct ixgbe_hw *hw = &adapter->hw;
80 struct vf_data_storage *vfinfo;
81 int i, j;
82 u32 vector_bit;
83 u32 vector_reg;
84 u32 mta_reg;
85
86 for (i = 0; i < adapter->num_vfs; i++) {
87 vfinfo = &adapter->vfinfo[i];
88 for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
89 hw->addr_ctrl.mta_in_use++;
90 vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
91 vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
92 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
93 mta_reg |= (1 << vector_bit);
94 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
95 }
96 }
97}
98
99int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf)
100{
101 u32 ctrl;
102
103 /* Check if global VLAN already set, if not set it */
104 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
105 if (!(ctrl & IXGBE_VLNCTRL_VFE)) {
106 /* enable VLAN tag insert/strip */
107 ctrl |= IXGBE_VLNCTRL_VFE;
108 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
109 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
110 }
111
112 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
113}
114
115
116void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf)
117{
118 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
119 vmolr |= (IXGBE_VMOLR_AUPE |
120 IXGBE_VMOLR_ROMPE |
121 IXGBE_VMOLR_ROPE |
122 IXGBE_VMOLR_BAM);
123 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
124}
125
126inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
127{
128 struct ixgbe_hw *hw = &adapter->hw;
129
130 /* reset offloads to defaults */
131 ixgbe_set_vmolr(hw, vf);
132
133
134 /* reset multicast table array for vf */
135 adapter->vfinfo[vf].num_vf_mc_hashes = 0;
136
137 /* Flush and reset the mta with the new values */
138 ixgbe_set_rx_mode(adapter->netdev);
139
140 if (adapter->vfinfo[vf].rar > 0) {
141 adapter->hw.mac.ops.clear_rar(&adapter->hw,
142 adapter->vfinfo[vf].rar);
143 adapter->vfinfo[vf].rar = -1;
144 }
145}
146
147int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
148 int vf, unsigned char *mac_addr)
149{
150 struct ixgbe_hw *hw = &adapter->hw;
151
152 adapter->vfinfo[vf].rar = hw->mac.ops.set_rar(hw, vf + 1, mac_addr,
153 vf, IXGBE_RAH_AV);
154 if (adapter->vfinfo[vf].rar < 0) {
155 DPRINTK(DRV, ERR, "Could not set MAC Filter for VF %d\n", vf);
156 return -1;
157 }
158
159 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
160
161 return 0;
162}
163
164int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
165{
166 unsigned char vf_mac_addr[6];
167 struct net_device *netdev = pci_get_drvdata(pdev);
168 struct ixgbe_adapter *adapter = netdev_priv(netdev);
169 unsigned int vfn = (event_mask & 0x3f);
170
171 bool enable = ((event_mask & 0x10000000U) != 0);
172
173 if (enable) {
174 random_ether_addr(vf_mac_addr);
175 DPRINTK(PROBE, INFO, "IOV: VF %d is enabled "
176 "mac %02X:%02X:%02X:%02X:%02X:%02X\n",
177 vfn,
178 vf_mac_addr[0], vf_mac_addr[1], vf_mac_addr[2],
179 vf_mac_addr[3], vf_mac_addr[4], vf_mac_addr[5]);
180 /*
181 * Store away the VF "permananet" MAC address, it will ask
182 * for it later.
183 */
184 memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
185 }
186
187 return 0;
188}
189
190inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
191{
192 struct ixgbe_hw *hw = &adapter->hw;
193 u32 reg;
194 u32 reg_offset, vf_shift;
195
196 vf_shift = vf % 32;
197 reg_offset = vf / 32;
198
199 /* enable transmit and receive for vf */
200 reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
201 reg |= (reg | (1 << vf_shift));
202 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
203
204 reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
205 reg |= (reg | (1 << vf_shift));
206 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
207
208 ixgbe_vf_reset_event(adapter, vf);
209}
210
211static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
212{
213 u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
214 u32 msgbuf[mbx_size];
215 struct ixgbe_hw *hw = &adapter->hw;
216 s32 retval;
217 int entries;
218 u16 *hash_list;
219 int add, vid;
220
221 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
222
223 if (retval)
224 printk(KERN_ERR "Error receiving message from VF\n");
225
226 /* this is a message we already processed, do nothing */
227 if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
228 return retval;
229
230 /*
231 * until the vf completes a virtual function reset it should not be
232 * allowed to start any configuration.
233 */
234
235 if (msgbuf[0] == IXGBE_VF_RESET) {
236 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
237 u8 *addr = (u8 *)(&msgbuf[1]);
238 DPRINTK(PROBE, INFO, "VF Reset msg received from vf %d\n", vf);
239 adapter->vfinfo[vf].clear_to_send = false;
240 ixgbe_vf_reset_msg(adapter, vf);
241 adapter->vfinfo[vf].clear_to_send = true;
242
243 /* reply to reset with ack and vf mac address */
244 msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
245 memcpy(addr, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
246 /*
247 * Piggyback the multicast filter type so VF can compute the
248 * correct vectors
249 */
250 msgbuf[3] = hw->mac.mc_filter_type;
251 ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
252
253 return retval;
254 }
255
256 if (!adapter->vfinfo[vf].clear_to_send) {
257 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
258 ixgbe_write_mbx(hw, msgbuf, 1, vf);
259 return retval;
260 }
261
262 switch ((msgbuf[0] & 0xFFFF)) {
263 case IXGBE_VF_SET_MAC_ADDR:
264 {
265 u8 *new_mac = ((u8 *)(&msgbuf[1]));
266 if (is_valid_ether_addr(new_mac))
267 ixgbe_set_vf_mac(adapter, vf, new_mac);
268 else
269 retval = -1;
270 }
271 break;
272 case IXGBE_VF_SET_MULTICAST:
273 entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
274 >> IXGBE_VT_MSGINFO_SHIFT;
275 hash_list = (u16 *)&msgbuf[1];
276 retval = ixgbe_set_vf_multicasts(adapter, entries,
277 hash_list, vf);
278 break;
279 case IXGBE_VF_SET_LPE:
280 WARN_ON((msgbuf[0] & 0xFFFF) == IXGBE_VF_SET_LPE);
281 break;
282 case IXGBE_VF_SET_VLAN:
283 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
284 >> IXGBE_VT_MSGINFO_SHIFT;
285 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
286 retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
287 break;
288 default:
289 DPRINTK(DRV, ERR, "Unhandled Msg %8.8x\n", msgbuf[0]);
290 retval = IXGBE_ERR_MBX;
291 break;
292 }
293
294 /* notify the VF of the results of what it sent us */
295 if (retval)
296 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
297 else
298 msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
299
300 msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
301
302 ixgbe_write_mbx(hw, msgbuf, 1, vf);
303
304 return retval;
305}
306
307static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
308{
309 struct ixgbe_hw *hw = &adapter->hw;
310 u32 msg = IXGBE_VT_MSGTYPE_NACK;
311
312 /* if device isn't clear to send it shouldn't be reading either */
313 if (!adapter->vfinfo[vf].clear_to_send)
314 ixgbe_write_mbx(hw, &msg, 1, vf);
315}
316
317void ixgbe_msg_task(struct ixgbe_adapter *adapter)
318{
319 struct ixgbe_hw *hw = &adapter->hw;
320 u32 vf;
321
322 for (vf = 0; vf < adapter->num_vfs; vf++) {
323 /* process any reset requests */
324 if (!ixgbe_check_for_rst(hw, vf))
325 ixgbe_vf_reset_event(adapter, vf);
326
327 /* process any messages pending */
328 if (!ixgbe_check_for_msg(hw, vf))
329 ixgbe_rcv_msg_from_vf(adapter, vf);
330
331 /* process any acks */
332 if (!ixgbe_check_for_ack(hw, vf))
333 ixgbe_rcv_ack_from_vf(adapter, vf);
334 }
335}
336
337void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
338{
339 struct ixgbe_hw *hw = &adapter->hw;
340
341 /* disable transmit and receive for all vfs */
342 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
343 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
344
345 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
346 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
347}
348
349void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
350{
351 struct ixgbe_hw *hw = &adapter->hw;
352 u32 ping;
353 int i;
354
355 for (i = 0 ; i < adapter->num_vfs; i++) {
356 ping = IXGBE_PF_CONTROL_MSG;
357 if (adapter->vfinfo[i].clear_to_send)
358 ping |= IXGBE_VT_MSGTYPE_CTS;
359 ixgbe_write_mbx(hw, &ping, 1, i);
360 }
361}
362
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
new file mode 100644
index 000000000000..51d1106c45a1
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -0,0 +1,47 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _IXGBE_SRIOV_H_
29#define _IXGBE_SRIOV_H_
30
31int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
32 int entries, u16 *hash_list, u32 vf);
33void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
34int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
35void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf);
36void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
37void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
38void ixgbe_msg_task(struct ixgbe_adapter *adapter);
39int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
40 int vf, unsigned char *mac_addr);
41int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
42void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
43void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
44void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
45
46#endif /* _IXGBE_SRIOV_H_ */
47
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 9eafddfa1b97..2be907466593 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -30,7 +30,7 @@
30 30
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/mdio.h> 32#include <linux/mdio.h>
33#include <linux/list.h> 33#include <linux/netdevice.h>
34 34
35/* Vendor ID */ 35/* Vendor ID */
36#define IXGBE_INTEL_VENDOR_ID 0x8086 36#define IXGBE_INTEL_VENDOR_ID 0x8086
@@ -277,6 +277,7 @@
277#define IXGBE_DTXCTL 0x07E00 277#define IXGBE_DTXCTL 0x07E00
278 278
279#define IXGBE_DMATXCTL 0x04A80 279#define IXGBE_DMATXCTL 0x04A80
280#define IXGBE_PFDTXGSWC 0x08220
280#define IXGBE_DTXMXSZRQ 0x08100 281#define IXGBE_DTXMXSZRQ 0x08100
281#define IXGBE_DTXTCPFLGL 0x04A88 282#define IXGBE_DTXTCPFLGL 0x04A88
282#define IXGBE_DTXTCPFLGH 0x04A8C 283#define IXGBE_DTXTCPFLGH 0x04A8C
@@ -287,6 +288,8 @@
287#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ 288#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
288#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ 289#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
289#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ 290#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
291
292#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
290#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ 293#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
291/* Tx DCA Control register : 128 of these (0-127) */ 294/* Tx DCA Control register : 128 of these (0-127) */
292#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) 295#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
@@ -497,6 +500,7 @@
497/* DCB registers */ 500/* DCB registers */
498#define IXGBE_RTRPCS 0x02430 501#define IXGBE_RTRPCS 0x02430
499#define IXGBE_RTTDCS 0x04900 502#define IXGBE_RTTDCS 0x04900
503#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
500#define IXGBE_RTTPCS 0x0CD00 504#define IXGBE_RTTPCS 0x0CD00
501#define IXGBE_RTRUP2TC 0x03020 505#define IXGBE_RTRUP2TC 0x03020
502#define IXGBE_RTTUP2TC 0x0C800 506#define IXGBE_RTTUP2TC 0x0C800
@@ -730,6 +734,13 @@
730#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 734#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
731#define IXGBE_GCR_CAP_VER2 0x00040000 735#define IXGBE_GCR_CAP_VER2 0x00040000
732 736
737#define IXGBE_GCR_EXT_MSIX_EN 0x80000000
738#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
739#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
740#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
741#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
742 IXGBE_GCR_EXT_VT_MODE_64)
743
733/* Time Sync Registers */ 744/* Time Sync Registers */
734#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ 745#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
735#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ 746#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
@@ -1065,6 +1076,8 @@
1065/* VFRE bitmask */ 1076/* VFRE bitmask */
1066#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF 1077#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF
1067 1078
1079#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
1080
1068/* RDHMPN and TDHMPN bitmasks */ 1081/* RDHMPN and TDHMPN bitmasks */
1069#define IXGBE_RDHMPN_RDICADDR 0x007FF800 1082#define IXGBE_RDHMPN_RDICADDR 0x007FF800
1070#define IXGBE_RDHMPN_RDICRDREQ 0x00800000 1083#define IXGBE_RDHMPN_RDICRDREQ 0x00800000
@@ -1295,6 +1308,7 @@
1295/* VLAN pool filtering masks */ 1308/* VLAN pool filtering masks */
1296#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */ 1309#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */
1297#define IXGBE_VLVF_ENTRIES 64 1310#define IXGBE_VLVF_ENTRIES 64
1311#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
1298 1312
1299#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ 1313#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
1300 1314
@@ -1843,6 +1857,12 @@
1843#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ 1857#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
1844#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 1858#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
1845 1859
1860/* SR-IOV specific macros */
1861#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
1862#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4))
1863#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
1864#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
1865
1846/* Little Endian defines */ 1866/* Little Endian defines */
1847#ifndef __le32 1867#ifndef __le32
1848#define __le32 u32 1868#define __le32 u32
@@ -2109,6 +2129,15 @@ struct ixgbe_atr_input {
2109 u8 byte_stream[42]; 2129 u8 byte_stream[42];
2110}; 2130};
2111 2131
2132struct ixgbe_atr_input_masks {
2133 u32 src_ip_mask;
2134 u32 dst_ip_mask;
2135 u16 src_port_mask;
2136 u16 dst_port_mask;
2137 u16 vlan_id_mask;
2138 u16 data_mask;
2139};
2140
2112enum ixgbe_eeprom_type { 2141enum ixgbe_eeprom_type {
2113 ixgbe_eeprom_uninitialized = 0, 2142 ixgbe_eeprom_uninitialized = 0,
2114 ixgbe_eeprom_spi, 2143 ixgbe_eeprom_spi,
@@ -2385,7 +2414,7 @@ struct ixgbe_mac_operations {
2385 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); 2414 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
2386 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); 2415 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
2387 s32 (*init_rx_addrs)(struct ixgbe_hw *); 2416 s32 (*init_rx_addrs)(struct ixgbe_hw *);
2388 s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct list_head *); 2417 s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
2389 s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, 2418 s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
2390 ixgbe_mc_addr_itr); 2419 ixgbe_mc_addr_itr);
2391 s32 (*enable_mc)(struct ixgbe_hw *); 2420 s32 (*enable_mc)(struct ixgbe_hw *);
@@ -2463,6 +2492,37 @@ struct ixgbe_phy_info {
2463 bool multispeed_fiber; 2492 bool multispeed_fiber;
2464}; 2493};
2465 2494
2495#include "ixgbe_mbx.h"
2496
2497struct ixgbe_mbx_operations {
2498 s32 (*init_params)(struct ixgbe_hw *hw);
2499 s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16);
2500 s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
2501 s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
2502 s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
2503 s32 (*check_for_msg)(struct ixgbe_hw *, u16);
2504 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
2505 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
2506};
2507
2508struct ixgbe_mbx_stats {
2509 u32 msgs_tx;
2510 u32 msgs_rx;
2511
2512 u32 acks;
2513 u32 reqs;
2514 u32 rsts;
2515};
2516
2517struct ixgbe_mbx_info {
2518 struct ixgbe_mbx_operations ops;
2519 struct ixgbe_mbx_stats stats;
2520 u32 timeout;
2521 u32 usec_delay;
2522 u32 v2p_mailbox;
2523 u16 size;
2524};
2525
2466struct ixgbe_hw { 2526struct ixgbe_hw {
2467 u8 __iomem *hw_addr; 2527 u8 __iomem *hw_addr;
2468 void *back; 2528 void *back;
@@ -2472,6 +2532,7 @@ struct ixgbe_hw {
2472 struct ixgbe_phy_info phy; 2532 struct ixgbe_phy_info phy;
2473 struct ixgbe_eeprom_info eeprom; 2533 struct ixgbe_eeprom_info eeprom;
2474 struct ixgbe_bus_info bus; 2534 struct ixgbe_bus_info bus;
2535 struct ixgbe_mbx_info mbx;
2475 u16 device_id; 2536 u16 device_id;
2476 u16 vendor_id; 2537 u16 vendor_id;
2477 u16 subsystem_device_id; 2538 u16 subsystem_device_id;
@@ -2486,6 +2547,7 @@ struct ixgbe_info {
2486 struct ixgbe_mac_operations *mac_ops; 2547 struct ixgbe_mac_operations *mac_ops;
2487 struct ixgbe_eeprom_operations *eeprom_ops; 2548 struct ixgbe_eeprom_operations *eeprom_ops;
2488 struct ixgbe_phy_operations *phy_ops; 2549 struct ixgbe_phy_operations *phy_ops;
2550 struct ixgbe_mbx_operations *mbx_ops;
2489}; 2551};
2490 2552
2491 2553
diff --git a/drivers/net/ixgbevf/Makefile b/drivers/net/ixgbevf/Makefile
new file mode 100644
index 000000000000..dd4e0d27e8cc
--- /dev/null
+++ b/drivers/net/ixgbevf/Makefile
@@ -0,0 +1,38 @@
1################################################################################
2#
3# Intel 82599 Virtual Function driver
4# Copyright(c) 1999 - 2009 Intel Corporation.
5#
6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License,
8# version 2, as published by the Free Software Foundation.
9#
10# This program is distributed in the hope it will be useful, but WITHOUT
11# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13# more details.
14#
15# You should have received a copy of the GNU General Public License along with
16# this program; if not, write to the Free Software Foundation, Inc.,
17# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18#
19# The full GNU General Public License is included in this distribution in
20# the file called "COPYING".
21#
22# Contact Information:
23# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25#
26################################################################################
27
28#
29# Makefile for the Intel(R) 82599 VF ethernet driver
30#
31
32obj-$(CONFIG_IXGBEVF) += ixgbevf.o
33
34ixgbevf-objs := vf.o \
35 mbx.o \
36 ethtool.o \
37 ixgbevf_main.o
38
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
new file mode 100644
index 000000000000..c44fdb05447a
--- /dev/null
+++ b/drivers/net/ixgbevf/defines.h
@@ -0,0 +1,292 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _IXGBEVF_DEFINES_H_
29#define _IXGBEVF_DEFINES_H_
30
31/* Device IDs */
32#define IXGBE_DEV_ID_82599_VF 0x10ED
33
34#define IXGBE_VF_IRQ_CLEAR_MASK 7
35#define IXGBE_VF_MAX_TX_QUEUES 1
36#define IXGBE_VF_MAX_RX_QUEUES 1
37#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
38
39/* Link speed */
40typedef u32 ixgbe_link_speed;
41#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
42#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
43
44#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
45#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
46#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
47#define IXGBE_LINKS_UP 0x40000000
48#define IXGBE_LINKS_SPEED 0x20000000
49
50/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
51#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
52#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8
53#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024
54
55/* Interrupt Vector Allocation Registers */
56#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
57
58#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
59
60/* Receive Config masks */
61#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
62#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
63#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
64#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
65
66/* DCA Control */
67#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
68
69/* PSRTYPE bit definitions */
70#define IXGBE_PSRTYPE_TCPHDR 0x00000010
71#define IXGBE_PSRTYPE_UDPHDR 0x00000020
72#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
73#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
74#define IXGBE_PSRTYPE_L2HDR 0x00001000
75
76/* SRRCTL bit definitions */
77#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
78#define IXGBE_SRRCTL_RDMTS_SHIFT 22
79#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000
80#define IXGBE_SRRCTL_DROP_EN 0x10000000
81#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
82#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
83#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
84#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
85#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
86#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
87#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
88#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
89
90/* Receive Descriptor bit definitions */
91#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
92#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
93#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */
94#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
95#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */
96#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004
97#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
98#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
99#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
100#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
101#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
102#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
103#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
104#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
105#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */
106#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */
107#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */
108#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
109#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */
110#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */
111#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */
112#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */
113#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
114#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
115#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
116#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */
117#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
118#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
119#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
120#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
121#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
122#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
123#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
124#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
125#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
126#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
127#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
128#define IXGBE_RXD_PRI_SHIFT 13
129#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
130#define IXGBE_RXD_CFI_SHIFT 12
131
132#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */
133#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */
134#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */
135#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */
136#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */
137#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */
138#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */
139#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
140#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
141#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
142#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
143
144#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
145#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
146#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
147#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
148#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
149#define IXGBE_RXDADV_RSCCNT_SHIFT 17
150#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
151#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
152#define IXGBE_RXDADV_SPH 0x8000
153
154#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
155 IXGBE_RXD_ERR_CE | \
156 IXGBE_RXD_ERR_LE | \
157 IXGBE_RXD_ERR_PE | \
158 IXGBE_RXD_ERR_OSE | \
159 IXGBE_RXD_ERR_USE)
160
161#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
162 IXGBE_RXDADV_ERR_CE | \
163 IXGBE_RXDADV_ERR_LE | \
164 IXGBE_RXDADV_ERR_PE | \
165 IXGBE_RXDADV_ERR_OSE | \
166 IXGBE_RXDADV_ERR_USE)
167
168#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
169#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
170#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */
171#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
172#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */
173#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */
174#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
175#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
176#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
177
178/* Transmit Descriptor - Advanced */
179union ixgbe_adv_tx_desc {
180 struct {
181 __le64 buffer_addr; /* Address of descriptor's data buf */
182 __le32 cmd_type_len;
183 __le32 olinfo_status;
184 } read;
185 struct {
186 __le64 rsvd; /* Reserved */
187 __le32 nxtseq_seed;
188 __le32 status;
189 } wb;
190};
191
192/* Receive Descriptor - Advanced */
193union ixgbe_adv_rx_desc {
194 struct {
195 __le64 pkt_addr; /* Packet buffer address */
196 __le64 hdr_addr; /* Header buffer address */
197 } read;
198 struct {
199 struct {
200 union {
201 __le32 data;
202 struct {
203 __le16 pkt_info; /* RSS, Pkt type */
204 __le16 hdr_info; /* Splithdr, hdrlen */
205 } hs_rss;
206 } lo_dword;
207 union {
208 __le32 rss; /* RSS Hash */
209 struct {
210 __le16 ip_id; /* IP id */
211 __le16 csum; /* Packet Checksum */
212 } csum_ip;
213 } hi_dword;
214 } lower;
215 struct {
216 __le32 status_error; /* ext status/error */
217 __le16 length; /* Packet length */
218 __le16 vlan; /* VLAN tag */
219 } upper;
220 } wb; /* writeback */
221};
222
223/* Context descriptors */
224struct ixgbe_adv_tx_context_desc {
225 __le32 vlan_macip_lens;
226 __le32 seqnum_seed;
227 __le32 type_tucmd_mlhl;
228 __le32 mss_l4len_idx;
229};
230
231/* Adv Transmit Descriptor Config Masks */
232#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
233#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
234#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
235#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
236#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
237#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
238#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
239#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
240#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
241#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
242#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
243#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
244#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
245#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
246#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
247#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
248#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
249#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
250 IXGBE_ADVTXD_POPTS_SHIFT)
251#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
252 IXGBE_ADVTXD_POPTS_SHIFT)
253#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
254#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
255#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
256#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
257#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
258
259/* Interrupt register bitmasks */
260
261/* Extended Interrupt Cause Read */
262#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
263#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
264#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
265
266/* Extended Interrupt Cause Set */
267#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
268#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
269#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
270
271/* Extended Interrupt Mask Set */
272#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
273#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
274#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
275
276/* Extended Interrupt Mask Clear */
277#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
278#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
279#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
280
281#define IXGBE_EIMS_ENABLE_MASK ( \
282 IXGBE_EIMS_RTX_QUEUE | \
283 IXGBE_EIMS_MAILBOX | \
284 IXGBE_EIMS_OTHER)
285
286#define IXGBE_EITR_CNT_WDIS 0x80000000
287
288/* Error Codes */
289#define IXGBE_ERR_INVALID_MAC_ADDR -1
290#define IXGBE_ERR_RESET_FAILED -2
291
292#endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
new file mode 100644
index 000000000000..399be0c34c36
--- /dev/null
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -0,0 +1,716 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28/* ethtool support for ixgbevf */
29
30#include <linux/types.h>
31#include <linux/module.h>
32#include <linux/pci.h>
33#include <linux/netdevice.h>
34#include <linux/ethtool.h>
35#include <linux/vmalloc.h>
36#include <linux/if_vlan.h>
37#include <linux/uaccess.h>
38
39#include "ixgbevf.h"
40
41#define IXGBE_ALL_RAR_ENTRIES 16
42
43#ifdef ETHTOOL_GSTATS
44struct ixgbe_stats {
45 char stat_string[ETH_GSTRING_LEN];
46 int sizeof_stat;
47 int stat_offset;
48 int base_stat_offset;
49};
50
51#define IXGBEVF_STAT(m, b) sizeof(((struct ixgbevf_adapter *)0)->m), \
52 offsetof(struct ixgbevf_adapter, m), \
53 offsetof(struct ixgbevf_adapter, b)
54static struct ixgbe_stats ixgbe_gstrings_stats[] = {
55 {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc)},
56 {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc)},
57 {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc)},
58 {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc)},
59 {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base)},
60 {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc)},
61 {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base)},
62 {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base)},
63 {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base)},
64 {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base)},
65};
66
67#define IXGBE_QUEUE_STATS_LEN 0
68#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
69
70#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
71#endif /* ETHTOOL_GSTATS */
72#ifdef ETHTOOL_TEST
73static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
74 "Register test (offline)",
75 "Link test (on/offline)"
76};
77#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
78#endif /* ETHTOOL_TEST */
79
80static int ixgbevf_get_settings(struct net_device *netdev,
81 struct ethtool_cmd *ecmd)
82{
83 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
84 struct ixgbe_hw *hw = &adapter->hw;
85 u32 link_speed = 0;
86 bool link_up;
87
88 ecmd->supported = SUPPORTED_10000baseT_Full;
89 ecmd->autoneg = AUTONEG_DISABLE;
90 ecmd->transceiver = XCVR_DUMMY1;
91 ecmd->port = -1;
92
93 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
94
95 if (link_up) {
96 ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
97 SPEED_10000 : SPEED_1000;
98 ecmd->duplex = DUPLEX_FULL;
99 } else {
100 ecmd->speed = -1;
101 ecmd->duplex = -1;
102 }
103
104 return 0;
105}
106
107static u32 ixgbevf_get_rx_csum(struct net_device *netdev)
108{
109 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
110 return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
111}
112
113static int ixgbevf_set_rx_csum(struct net_device *netdev, u32 data)
114{
115 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
116 if (data)
117 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
118 else
119 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
120
121 if (netif_running(netdev)) {
122 if (!adapter->dev_closed)
123 ixgbevf_reinit_locked(adapter);
124 } else {
125 ixgbevf_reset(adapter);
126 }
127
128 return 0;
129}
130
131static int ixgbevf_set_tso(struct net_device *netdev, u32 data)
132{
133 if (data) {
134 netdev->features |= NETIF_F_TSO;
135 netdev->features |= NETIF_F_TSO6;
136 } else {
137 netif_tx_stop_all_queues(netdev);
138 netdev->features &= ~NETIF_F_TSO;
139 netdev->features &= ~NETIF_F_TSO6;
140 netif_tx_start_all_queues(netdev);
141 }
142 return 0;
143}
144
145static u32 ixgbevf_get_msglevel(struct net_device *netdev)
146{
147 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
148 return adapter->msg_enable;
149}
150
151static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
152{
153 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
154 adapter->msg_enable = data;
155}
156
157#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
158
159static char *ixgbevf_reg_names[] = {
160 "IXGBE_VFCTRL",
161 "IXGBE_VFSTATUS",
162 "IXGBE_VFLINKS",
163 "IXGBE_VFRXMEMWRAP",
164 "IXGBE_VFRTIMER",
165 "IXGBE_VTEICR",
166 "IXGBE_VTEICS",
167 "IXGBE_VTEIMS",
168 "IXGBE_VTEIMC",
169 "IXGBE_VTEIAC",
170 "IXGBE_VTEIAM",
171 "IXGBE_VTEITR",
172 "IXGBE_VTIVAR",
173 "IXGBE_VTIVAR_MISC",
174 "IXGBE_VFRDBAL0",
175 "IXGBE_VFRDBAL1",
176 "IXGBE_VFRDBAH0",
177 "IXGBE_VFRDBAH1",
178 "IXGBE_VFRDLEN0",
179 "IXGBE_VFRDLEN1",
180 "IXGBE_VFRDH0",
181 "IXGBE_VFRDH1",
182 "IXGBE_VFRDT0",
183 "IXGBE_VFRDT1",
184 "IXGBE_VFRXDCTL0",
185 "IXGBE_VFRXDCTL1",
186 "IXGBE_VFSRRCTL0",
187 "IXGBE_VFSRRCTL1",
188 "IXGBE_VFPSRTYPE",
189 "IXGBE_VFTDBAL0",
190 "IXGBE_VFTDBAL1",
191 "IXGBE_VFTDBAH0",
192 "IXGBE_VFTDBAH1",
193 "IXGBE_VFTDLEN0",
194 "IXGBE_VFTDLEN1",
195 "IXGBE_VFTDH0",
196 "IXGBE_VFTDH1",
197 "IXGBE_VFTDT0",
198 "IXGBE_VFTDT1",
199 "IXGBE_VFTXDCTL0",
200 "IXGBE_VFTXDCTL1",
201 "IXGBE_VFTDWBAL0",
202 "IXGBE_VFTDWBAL1",
203 "IXGBE_VFTDWBAH0",
204 "IXGBE_VFTDWBAH1"
205};
206
207
208static int ixgbevf_get_regs_len(struct net_device *netdev)
209{
210 return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
211}
212
213static void ixgbevf_get_regs(struct net_device *netdev,
214 struct ethtool_regs *regs,
215 void *p)
216{
217 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
218 struct ixgbe_hw *hw = &adapter->hw;
219 u32 *regs_buff = p;
220 u32 regs_len = ixgbevf_get_regs_len(netdev);
221 u8 i;
222
223 memset(p, 0, regs_len);
224
225 regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
226
227 /* General Registers */
228 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
229 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
230 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
231 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
232 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFRTIMER);
233
234 /* Interrupt */
235 /* don't read EICR because it can clear interrupt causes, instead
236 * read EICS which is a shadow but doesn't clear EICR */
237 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
238 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
239 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
240 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
241 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
242 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
243 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
244 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
245 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
246
247 /* Receive DMA */
248 for (i = 0; i < 2; i++)
249 regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
250 for (i = 0; i < 2; i++)
251 regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
252 for (i = 0; i < 2; i++)
253 regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
254 for (i = 0; i < 2; i++)
255 regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
256 for (i = 0; i < 2; i++)
257 regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
258 for (i = 0; i < 2; i++)
259 regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
260 for (i = 0; i < 2; i++)
261 regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
262
263 /* Receive */
264 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
265
266 /* Transmit */
267 for (i = 0; i < 2; i++)
268 regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
269 for (i = 0; i < 2; i++)
270 regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
271 for (i = 0; i < 2; i++)
272 regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
273 for (i = 0; i < 2; i++)
274 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
275 for (i = 0; i < 2; i++)
276 regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
277 for (i = 0; i < 2; i++)
278 regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
279 for (i = 0; i < 2; i++)
280 regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
281 for (i = 0; i < 2; i++)
282 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
283
284 for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
285 hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
286}
287
288static void ixgbevf_get_drvinfo(struct net_device *netdev,
289 struct ethtool_drvinfo *drvinfo)
290{
291 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
292
293 strlcpy(drvinfo->driver, ixgbevf_driver_name, 32);
294 strlcpy(drvinfo->version, ixgbevf_driver_version, 32);
295
296 strlcpy(drvinfo->fw_version, "N/A", 4);
297 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
298}
299
300static void ixgbevf_get_ringparam(struct net_device *netdev,
301 struct ethtool_ringparam *ring)
302{
303 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
304 struct ixgbevf_ring *tx_ring = adapter->tx_ring;
305 struct ixgbevf_ring *rx_ring = adapter->rx_ring;
306
307 ring->rx_max_pending = IXGBEVF_MAX_RXD;
308 ring->tx_max_pending = IXGBEVF_MAX_TXD;
309 ring->rx_mini_max_pending = 0;
310 ring->rx_jumbo_max_pending = 0;
311 ring->rx_pending = rx_ring->count;
312 ring->tx_pending = tx_ring->count;
313 ring->rx_mini_pending = 0;
314 ring->rx_jumbo_pending = 0;
315}
316
317static int ixgbevf_set_ringparam(struct net_device *netdev,
318 struct ethtool_ringparam *ring)
319{
320 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
321 struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
322 int i, err;
323 u32 new_rx_count, new_tx_count;
324 bool need_tx_update = false;
325 bool need_rx_update = false;
326
327 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
328 return -EINVAL;
329
330 new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
331 new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
332 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
333
334 new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
335 new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
336 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
337
338 if ((new_tx_count == adapter->tx_ring->count) &&
339 (new_rx_count == adapter->rx_ring->count)) {
340 /* nothing to do */
341 return 0;
342 }
343
344 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
345 msleep(1);
346
347 if (new_tx_count != adapter->tx_ring_count) {
348 tx_ring = kcalloc(adapter->num_tx_queues,
349 sizeof(struct ixgbevf_ring), GFP_KERNEL);
350 if (!tx_ring) {
351 err = -ENOMEM;
352 goto err_setup;
353 }
354 memcpy(tx_ring, adapter->tx_ring,
355 adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
356 for (i = 0; i < adapter->num_tx_queues; i++) {
357 tx_ring[i].count = new_tx_count;
358 err = ixgbevf_setup_tx_resources(adapter,
359 &tx_ring[i]);
360 if (err) {
361 while (i) {
362 i--;
363 ixgbevf_free_tx_resources(adapter,
364 &tx_ring[i]);
365 }
366 kfree(tx_ring);
367 goto err_setup;
368 }
369 tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
370 }
371 need_tx_update = true;
372 }
373
374 if (new_rx_count != adapter->rx_ring_count) {
375 rx_ring = kcalloc(adapter->num_rx_queues,
376 sizeof(struct ixgbevf_ring), GFP_KERNEL);
377 if ((!rx_ring) && (need_tx_update)) {
378 err = -ENOMEM;
379 goto err_rx_setup;
380 }
381 memcpy(rx_ring, adapter->rx_ring,
382 adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
383 for (i = 0; i < adapter->num_rx_queues; i++) {
384 rx_ring[i].count = new_rx_count;
385 err = ixgbevf_setup_rx_resources(adapter,
386 &rx_ring[i]);
387 if (err) {
388 while (i) {
389 i--;
390 ixgbevf_free_rx_resources(adapter,
391 &rx_ring[i]);
392 }
393 kfree(rx_ring);
394 goto err_rx_setup;
395 }
396 rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
397 }
398 need_rx_update = true;
399 }
400
401err_rx_setup:
402 /* if rings need to be updated, here's the place to do it in one shot */
403 if (need_tx_update || need_rx_update) {
404 if (netif_running(netdev))
405 ixgbevf_down(adapter);
406 }
407
408 /* tx */
409 if (need_tx_update) {
410 kfree(adapter->tx_ring);
411 adapter->tx_ring = tx_ring;
412 tx_ring = NULL;
413 adapter->tx_ring_count = new_tx_count;
414 }
415
416 /* rx */
417 if (need_rx_update) {
418 kfree(adapter->rx_ring);
419 adapter->rx_ring = rx_ring;
420 rx_ring = NULL;
421 adapter->rx_ring_count = new_rx_count;
422 }
423
424 /* success! */
425 err = 0;
426 if (netif_running(netdev))
427 ixgbevf_up(adapter);
428
429err_setup:
430 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
431 return err;
432}
433
434static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
435{
436 switch (stringset) {
437 case ETH_SS_TEST:
438 return IXGBE_TEST_LEN;
439 case ETH_SS_STATS:
440 return IXGBE_GLOBAL_STATS_LEN;
441 default:
442 return -EINVAL;
443 }
444}
445
446static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
447 struct ethtool_stats *stats, u64 *data)
448{
449 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
450 int i;
451
452 ixgbevf_update_stats(adapter);
453 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
454 char *p = (char *)adapter +
455 ixgbe_gstrings_stats[i].stat_offset;
456 char *b = (char *)adapter +
457 ixgbe_gstrings_stats[i].base_stat_offset;
458 data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
459 sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
460 ((ixgbe_gstrings_stats[i].sizeof_stat ==
461 sizeof(u64)) ? *(u64 *)b : *(u32 *)b);
462 }
463}
464
465static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
466 u8 *data)
467{
468 char *p = (char *)data;
469 int i;
470
471 switch (stringset) {
472 case ETH_SS_TEST:
473 memcpy(data, *ixgbe_gstrings_test,
474 IXGBE_TEST_LEN * ETH_GSTRING_LEN);
475 break;
476 case ETH_SS_STATS:
477 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
478 memcpy(p, ixgbe_gstrings_stats[i].stat_string,
479 ETH_GSTRING_LEN);
480 p += ETH_GSTRING_LEN;
481 }
482 break;
483 }
484}
485
486static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
487{
488 struct ixgbe_hw *hw = &adapter->hw;
489 bool link_up;
490 u32 link_speed = 0;
491 *data = 0;
492
493 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
494 if (!link_up)
495 *data = 1;
496
497 return *data;
498}
499
500/* ethtool register test data */
501struct ixgbevf_reg_test {
502 u16 reg;
503 u8 array_len;
504 u8 test_type;
505 u32 mask;
506 u32 write;
507};
508
509/* In the hardware, registers are laid out either singly, in arrays
510 * spaced 0x40 bytes apart, or in contiguous tables. We assume
511 * most tests take place on arrays or single registers (handled
512 * as a single-element array) and special-case the tables.
513 * Table tests are always pattern tests.
514 *
515 * We also make provision for some required setup steps by specifying
516 * registers to be written without any read-back testing.
517 */
518
519#define PATTERN_TEST 1
520#define SET_READ_TEST 2
521#define WRITE_NO_TEST 3
522#define TABLE32_TEST 4
523#define TABLE64_TEST_LO 5
524#define TABLE64_TEST_HI 6
525
526/* default VF register test */
527static struct ixgbevf_reg_test reg_test_vf[] = {
528 { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
529 { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
530 { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
531 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
532 { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
533 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
534 { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
535 { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
536 { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
537 { 0, 0, 0, 0 }
538};
539
540#define REG_PATTERN_TEST(R, M, W) \
541{ \
542 u32 pat, val, before; \
543 const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
544 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
545 before = readl(adapter->hw.hw_addr + R); \
546 writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
547 val = readl(adapter->hw.hw_addr + R); \
548 if (val != (_test[pat] & W & M)) { \
549 hw_dbg(&adapter->hw, \
550 "pattern test reg %04X failed: got " \
551 "0x%08X expected 0x%08X\n", \
552 R, val, (_test[pat] & W & M)); \
553 *data = R; \
554 writel(before, adapter->hw.hw_addr + R); \
555 return 1; \
556 } \
557 writel(before, adapter->hw.hw_addr + R); \
558 } \
559}
560
561#define REG_SET_AND_CHECK(R, M, W) \
562{ \
563 u32 val, before; \
564 before = readl(adapter->hw.hw_addr + R); \
565 writel((W & M), (adapter->hw.hw_addr + R)); \
566 val = readl(adapter->hw.hw_addr + R); \
567 if ((W & M) != (val & M)) { \
568 printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \
569 "expected 0x%08X\n", R, (val & M), (W & M)); \
570 *data = R; \
571 writel(before, (adapter->hw.hw_addr + R)); \
572 return 1; \
573 } \
574 writel(before, (adapter->hw.hw_addr + R)); \
575}
576
577static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
578{
579 struct ixgbevf_reg_test *test;
580 u32 i;
581
582 test = reg_test_vf;
583
584 /*
585 * Perform the register test, looping through the test table
586 * until we either fail or reach the null entry.
587 */
588 while (test->reg) {
589 for (i = 0; i < test->array_len; i++) {
590 switch (test->test_type) {
591 case PATTERN_TEST:
592 REG_PATTERN_TEST(test->reg + (i * 0x40),
593 test->mask,
594 test->write);
595 break;
596 case SET_READ_TEST:
597 REG_SET_AND_CHECK(test->reg + (i * 0x40),
598 test->mask,
599 test->write);
600 break;
601 case WRITE_NO_TEST:
602 writel(test->write,
603 (adapter->hw.hw_addr + test->reg)
604 + (i * 0x40));
605 break;
606 case TABLE32_TEST:
607 REG_PATTERN_TEST(test->reg + (i * 4),
608 test->mask,
609 test->write);
610 break;
611 case TABLE64_TEST_LO:
612 REG_PATTERN_TEST(test->reg + (i * 8),
613 test->mask,
614 test->write);
615 break;
616 case TABLE64_TEST_HI:
617 REG_PATTERN_TEST((test->reg + 4) + (i * 8),
618 test->mask,
619 test->write);
620 break;
621 }
622 }
623 test++;
624 }
625
626 *data = 0;
627 return *data;
628}
629
630static void ixgbevf_diag_test(struct net_device *netdev,
631 struct ethtool_test *eth_test, u64 *data)
632{
633 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
634 bool if_running = netif_running(netdev);
635
636 set_bit(__IXGBEVF_TESTING, &adapter->state);
637 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
638 /* Offline tests */
639
640 hw_dbg(&adapter->hw, "offline testing starting\n");
641
642 /* Link test performed before hardware reset so autoneg doesn't
643 * interfere with test result */
644 if (ixgbevf_link_test(adapter, &data[1]))
645 eth_test->flags |= ETH_TEST_FL_FAILED;
646
647 if (if_running)
648 /* indicate we're in test mode */
649 dev_close(netdev);
650 else
651 ixgbevf_reset(adapter);
652
653 hw_dbg(&adapter->hw, "register testing starting\n");
654 if (ixgbevf_reg_test(adapter, &data[0]))
655 eth_test->flags |= ETH_TEST_FL_FAILED;
656
657 ixgbevf_reset(adapter);
658
659 clear_bit(__IXGBEVF_TESTING, &adapter->state);
660 if (if_running)
661 dev_open(netdev);
662 } else {
663 hw_dbg(&adapter->hw, "online testing starting\n");
664 /* Online tests */
665 if (ixgbevf_link_test(adapter, &data[1]))
666 eth_test->flags |= ETH_TEST_FL_FAILED;
667
668 /* Online tests aren't run; pass by default */
669 data[0] = 0;
670
671 clear_bit(__IXGBEVF_TESTING, &adapter->state);
672 }
673 msleep_interruptible(4 * 1000);
674}
675
676static int ixgbevf_nway_reset(struct net_device *netdev)
677{
678 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
679
680 if (netif_running(netdev)) {
681 if (!adapter->dev_closed)
682 ixgbevf_reinit_locked(adapter);
683 }
684
685 return 0;
686}
687
688static struct ethtool_ops ixgbevf_ethtool_ops = {
689 .get_settings = ixgbevf_get_settings,
690 .get_drvinfo = ixgbevf_get_drvinfo,
691 .get_regs_len = ixgbevf_get_regs_len,
692 .get_regs = ixgbevf_get_regs,
693 .nway_reset = ixgbevf_nway_reset,
694 .get_link = ethtool_op_get_link,
695 .get_ringparam = ixgbevf_get_ringparam,
696 .set_ringparam = ixgbevf_set_ringparam,
697 .get_rx_csum = ixgbevf_get_rx_csum,
698 .set_rx_csum = ixgbevf_set_rx_csum,
699 .get_tx_csum = ethtool_op_get_tx_csum,
700 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
701 .get_sg = ethtool_op_get_sg,
702 .set_sg = ethtool_op_set_sg,
703 .get_msglevel = ixgbevf_get_msglevel,
704 .set_msglevel = ixgbevf_set_msglevel,
705 .get_tso = ethtool_op_get_tso,
706 .set_tso = ixgbevf_set_tso,
707 .self_test = ixgbevf_diag_test,
708 .get_sset_count = ixgbevf_get_sset_count,
709 .get_strings = ixgbevf_get_strings,
710 .get_ethtool_stats = ixgbevf_get_ethtool_stats,
711};
712
713void ixgbevf_set_ethtool_ops(struct net_device *netdev)
714{
715 SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);
716}
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
new file mode 100644
index 000000000000..f7015efbff05
--- /dev/null
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -0,0 +1,318 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _IXGBEVF_H_
29#define _IXGBEVF_H_
30
31#include <linux/types.h>
32#include <linux/timer.h>
33#include <linux/io.h>
34#include <linux/netdevice.h>
35
36#include "vf.h"
37
38/* wrapper around a pointer to a socket buffer,
39 * so a DMA handle can be stored along with the buffer */
40struct ixgbevf_tx_buffer {
41 struct sk_buff *skb;
42 dma_addr_t dma;
43 unsigned long time_stamp;
44 u16 length;
45 u16 next_to_watch;
46 u16 mapped_as_page;
47};
48
49struct ixgbevf_rx_buffer {
50 struct sk_buff *skb;
51 dma_addr_t dma;
52 struct page *page;
53 dma_addr_t page_dma;
54 unsigned int page_offset;
55};
56
57struct ixgbevf_ring {
58 struct ixgbevf_adapter *adapter; /* backlink */
59 void *desc; /* descriptor ring memory */
60 dma_addr_t dma; /* phys. address of descriptor ring */
61 unsigned int size; /* length in bytes */
62 unsigned int count; /* amount of descriptors */
63 unsigned int next_to_use;
64 unsigned int next_to_clean;
65
66 int queue_index; /* needed for multiqueue queue management */
67 union {
68 struct ixgbevf_tx_buffer *tx_buffer_info;
69 struct ixgbevf_rx_buffer *rx_buffer_info;
70 };
71
72 u16 head;
73 u16 tail;
74
75 unsigned int total_bytes;
76 unsigned int total_packets;
77
78 u16 reg_idx; /* holds the special value that gets the hardware register
79 * offset associated with this ring, which is different
80 * for DCB and RSS modes */
81
82#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
83 /* cpu for tx queue */
84 int cpu;
85#endif
86
87 u64 v_idx; /* maps directly to the index for this ring in the hardware
88 * vector array, can also be used for finding the bit in EICR
89 * and friends that represents the vector for this ring */
90
91 u16 work_limit; /* max work per interrupt */
92 u16 rx_buf_len;
93};
94
95enum ixgbevf_ring_f_enum {
96 RING_F_NONE = 0,
97 RING_F_ARRAY_SIZE /* must be last in enum set */
98};
99
100struct ixgbevf_ring_feature {
101 int indices;
102 int mask;
103};
104
105/* How many Rx Buffers do we bundle into one write to the hardware ? */
106#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
107
108#define MAX_RX_QUEUES 1
109#define MAX_TX_QUEUES 1
110
111#define IXGBEVF_DEFAULT_TXD 1024
112#define IXGBEVF_DEFAULT_RXD 512
113#define IXGBEVF_MAX_TXD 4096
114#define IXGBEVF_MIN_TXD 64
115#define IXGBEVF_MAX_RXD 4096
116#define IXGBEVF_MIN_RXD 64
117
118/* Supported Rx Buffer Sizes */
119#define IXGBEVF_RXBUFFER_64 64 /* Used for packet split */
120#define IXGBEVF_RXBUFFER_128 128 /* Used for packet split */
121#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
122#define IXGBEVF_RXBUFFER_2048 2048
123#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
124
125#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
126
127#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
128
129#define IXGBE_TX_FLAGS_CSUM (u32)(1)
130#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
131#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
132#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
133#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4)
134#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5)
135#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
136#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
137#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
138
139/* MAX_MSIX_Q_VECTORS of these are allocated,
140 * but we only use one per queue-specific vector.
141 */
142struct ixgbevf_q_vector {
143 struct ixgbevf_adapter *adapter;
144 struct napi_struct napi;
145 DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
146 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
147 u8 rxr_count; /* Rx ring count assigned to this vector */
148 u8 txr_count; /* Tx ring count assigned to this vector */
149 u8 tx_itr;
150 u8 rx_itr;
151 u32 eitr;
152 int v_idx; /* vector index in list */
153};
154
155/* Helper macros to switch between ints/sec and what the register uses.
156 * And yes, it's the same math going both ways. The lowest value
157 * supported by all of the ixgbe hardware is 8.
158 */
159#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
160 ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
161#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
162
163#define IXGBE_DESC_UNUSED(R) \
164 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
165 (R)->next_to_clean - (R)->next_to_use - 1)
166
167#define IXGBE_RX_DESC_ADV(R, i) \
168 (&(((union ixgbe_adv_rx_desc *)((R).desc))[i]))
169#define IXGBE_TX_DESC_ADV(R, i) \
170 (&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
171#define IXGBE_TX_CTXTDESC_ADV(R, i) \
172 (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
173
174#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
175
176#define OTHER_VECTOR 1
177#define NON_Q_VECTORS (OTHER_VECTOR)
178
179#define MAX_MSIX_Q_VECTORS 2
180#define MAX_MSIX_COUNT 2
181
182#define MIN_MSIX_Q_VECTORS 2
183#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
184
185/* board specific private data structure */
186struct ixgbevf_adapter {
187 struct timer_list watchdog_timer;
188#ifdef NETIF_F_HW_VLAN_TX
189 struct vlan_group *vlgrp;
190#endif
191 u16 bd_number;
192 struct work_struct reset_task;
193 struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
194 char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
195
196 /* Interrupt Throttle Rate */
197 u32 itr_setting;
198 u16 eitr_low;
199 u16 eitr_high;
200
201 /* TX */
202 struct ixgbevf_ring *tx_ring; /* One per active queue */
203 int num_tx_queues;
204 u64 restart_queue;
205 u64 hw_csum_tx_good;
206 u64 lsc_int;
207 u64 hw_tso_ctxt;
208 u64 hw_tso6_ctxt;
209 u32 tx_timeout_count;
210 bool detect_tx_hung;
211
212 /* RX */
213 struct ixgbevf_ring *rx_ring; /* One per active queue */
214 int num_rx_queues;
215 int num_rx_pools; /* == num_rx_queues in 82598 */
216 int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
217 u64 hw_csum_rx_error;
218 u64 hw_rx_no_dma_resources;
219 u64 hw_csum_rx_good;
220 u64 non_eop_descs;
221 int num_msix_vectors;
222 int max_msix_q_vectors; /* true count of q_vectors for device */
223 struct ixgbevf_ring_feature ring_feature[RING_F_ARRAY_SIZE];
224 struct msix_entry *msix_entries;
225
226 u64 rx_hdr_split;
227 u32 alloc_rx_page_failed;
228 u32 alloc_rx_buff_failed;
229
230 /* Some features need tri-state capability,
231 * thus the additional *_CAPABLE flags.
232 */
233 u32 flags;
234#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
235#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
236#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
237#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3)
238#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4)
239#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5)
240#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 6)
241#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
242#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 8)
243 /* OS defined structs */
244 struct net_device *netdev;
245 struct pci_dev *pdev;
246 struct net_device_stats net_stats;
247
248 /* structs defined in ixgbe_vf.h */
249 struct ixgbe_hw hw;
250 u16 msg_enable;
251 struct ixgbevf_hw_stats stats;
252 u64 zero_base;
253 /* Interrupt Throttle Rate */
254 u32 eitr_param;
255
256 unsigned long state;
257 u32 *config_space;
258 u64 tx_busy;
259 unsigned int tx_ring_count;
260 unsigned int rx_ring_count;
261
262 u32 link_speed;
263 bool link_up;
264 unsigned long link_check_timeout;
265
266 struct work_struct watchdog_task;
267 bool netdev_registered;
268 bool dev_closed;
269};
270
271enum ixbgevf_state_t {
272 __IXGBEVF_TESTING,
273 __IXGBEVF_RESETTING,
274 __IXGBEVF_DOWN
275};
276
277enum ixgbevf_boards {
278 board_82599_vf,
279};
280
281extern struct ixgbevf_info ixgbevf_vf_info;
282extern struct ixgbe_mac_operations ixgbevf_mbx_ops;
283
284/* needed by ethtool.c */
285extern char ixgbevf_driver_name[];
286extern const char ixgbevf_driver_version[];
287
288extern int ixgbevf_up(struct ixgbevf_adapter *adapter);
289extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
290extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
291extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
292extern void ixgbevf_set_ethtool_ops(struct net_device *netdev);
293extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *,
294 struct ixgbevf_ring *);
295extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *,
296 struct ixgbevf_ring *);
297extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
298 struct ixgbevf_ring *);
299extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
300 struct ixgbevf_ring *);
301extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
302
303#ifdef ETHTOOL_OPS_COMPAT
304extern int ethtool_ioctl(struct ifreq *ifr);
305
306#endif
307extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
308extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
309
310#ifdef DEBUG
311extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
312#define hw_dbg(hw, format, arg...) \
313 printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
314#else
315#define hw_dbg(hw, format, arg...) do {} while (0)
316#endif
317
318#endif /* _IXGBEVF_H_ */
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
new file mode 100644
index 000000000000..235b5fd4b8d4
--- /dev/null
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -0,0 +1,3578 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29/******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31******************************************************************************/
32#include <linux/types.h>
33#include <linux/module.h>
34#include <linux/pci.h>
35#include <linux/netdevice.h>
36#include <linux/vmalloc.h>
37#include <linux/string.h>
38#include <linux/in.h>
39#include <linux/ip.h>
40#include <linux/tcp.h>
41#include <linux/ipv6.h>
42#include <net/checksum.h>
43#include <net/ip6_checksum.h>
44#include <linux/ethtool.h>
45#include <linux/if_vlan.h>
46
47#include "ixgbevf.h"
48
49char ixgbevf_driver_name[] = "ixgbevf";
50static const char ixgbevf_driver_string[] =
51 "Intel(R) 82599 Virtual Function";
52
53#define DRV_VERSION "1.0.0-k0"
54const char ixgbevf_driver_version[] = DRV_VERSION;
55static char ixgbevf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
56
57static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
58 [board_82599_vf] = &ixgbevf_vf_info,
59};
60
61/* ixgbevf_pci_tbl - PCI Device ID Table
62 *
63 * Wildcard entries (PCI_ANY_ID) should come last
64 * Last entry must be all 0s
65 *
66 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
67 * Class, Class Mask, private data (not used) }
68 */
69static struct pci_device_id ixgbevf_pci_tbl[] = {
70 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
71 board_82599_vf},
72
73 /* required last entry */
74 {0, }
75};
76MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
77
78MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
79MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
80MODULE_LICENSE("GPL");
81MODULE_VERSION(DRV_VERSION);
82
83#define DEFAULT_DEBUG_LEVEL_SHIFT 3
84
85/* forward decls */
86static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
87static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
88 u32 itr_reg);
89
90static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
91 struct ixgbevf_ring *rx_ring,
92 u32 val)
93{
94 /*
95 * Force memory writes to complete before letting h/w
96 * know there are new descriptors to fetch. (Only
97 * applicable for weak-ordered memory model archs,
98 * such as IA-64).
99 */
100 wmb();
101 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
102}
103
104/*
105 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
106 * @adapter: pointer to adapter struct
107 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
108 * @queue: queue to map the corresponding interrupt to
109 * @msix_vector: the vector to map to the corresponding queue
110 *
111 */
112static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
113 u8 queue, u8 msix_vector)
114{
115 u32 ivar, index;
116 struct ixgbe_hw *hw = &adapter->hw;
117 if (direction == -1) {
118 /* other causes */
119 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
120 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
121 ivar &= ~0xFF;
122 ivar |= msix_vector;
123 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
124 } else {
125 /* tx or rx causes */
126 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
127 index = ((16 * (queue & 1)) + (8 * direction));
128 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
129 ivar &= ~(0xFF << index);
130 ivar |= (msix_vector << index);
131 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
132 }
133}
134
135static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
136 struct ixgbevf_tx_buffer
137 *tx_buffer_info)
138{
139 if (tx_buffer_info->dma) {
140 if (tx_buffer_info->mapped_as_page)
141 pci_unmap_page(adapter->pdev,
142 tx_buffer_info->dma,
143 tx_buffer_info->length,
144 PCI_DMA_TODEVICE);
145 else
146 pci_unmap_single(adapter->pdev,
147 tx_buffer_info->dma,
148 tx_buffer_info->length,
149 PCI_DMA_TODEVICE);
150 tx_buffer_info->dma = 0;
151 }
152 if (tx_buffer_info->skb) {
153 dev_kfree_skb_any(tx_buffer_info->skb);
154 tx_buffer_info->skb = NULL;
155 }
156 tx_buffer_info->time_stamp = 0;
157 /* tx_buffer_info must be completely set up in the transmit path */
158}
159
160static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter *adapter,
161 struct ixgbevf_ring *tx_ring,
162 unsigned int eop)
163{
164 struct ixgbe_hw *hw = &adapter->hw;
165 u32 head, tail;
166
167 /* Detect a transmit hang in hardware, this serializes the
168 * check with the clearing of time_stamp and movement of eop */
169 head = readl(hw->hw_addr + tx_ring->head);
170 tail = readl(hw->hw_addr + tx_ring->tail);
171 adapter->detect_tx_hung = false;
172 if ((head != tail) &&
173 tx_ring->tx_buffer_info[eop].time_stamp &&
174 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) {
175 /* detected Tx unit hang */
176 union ixgbe_adv_tx_desc *tx_desc;
177 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
178 printk(KERN_ERR "Detected Tx Unit Hang\n"
179 " Tx Queue <%d>\n"
180 " TDH, TDT <%x>, <%x>\n"
181 " next_to_use <%x>\n"
182 " next_to_clean <%x>\n"
183 "tx_buffer_info[next_to_clean]\n"
184 " time_stamp <%lx>\n"
185 " jiffies <%lx>\n",
186 tx_ring->queue_index,
187 head, tail,
188 tx_ring->next_to_use, eop,
189 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
190 return true;
191 }
192
193 return false;
194}
195
196#define IXGBE_MAX_TXD_PWR 14
197#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
198
199/* Tx Descriptors needed, worst case */
200#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
201 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
202#ifdef MAX_SKB_FRAGS
203#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
204 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
205#else
206#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
207#endif
208
209static void ixgbevf_tx_timeout(struct net_device *netdev);
210
211/**
212 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
213 * @adapter: board private structure
214 * @tx_ring: tx ring to clean
215 **/
216static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
217 struct ixgbevf_ring *tx_ring)
218{
219 struct net_device *netdev = adapter->netdev;
220 struct ixgbe_hw *hw = &adapter->hw;
221 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
222 struct ixgbevf_tx_buffer *tx_buffer_info;
223 unsigned int i, eop, count = 0;
224 unsigned int total_bytes = 0, total_packets = 0;
225
226 i = tx_ring->next_to_clean;
227 eop = tx_ring->tx_buffer_info[i].next_to_watch;
228 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
229
230 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
231 (count < tx_ring->work_limit)) {
232 bool cleaned = false;
233 for ( ; !cleaned; count++) {
234 struct sk_buff *skb;
235 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
236 tx_buffer_info = &tx_ring->tx_buffer_info[i];
237 cleaned = (i == eop);
238 skb = tx_buffer_info->skb;
239
240 if (cleaned && skb) {
241 unsigned int segs, bytecount;
242
243 /* gso_segs is currently only valid for tcp */
244 segs = skb_shinfo(skb)->gso_segs ?: 1;
245 /* multiply data chunks by size of headers */
246 bytecount = ((segs - 1) * skb_headlen(skb)) +
247 skb->len;
248 total_packets += segs;
249 total_bytes += bytecount;
250 }
251
252 ixgbevf_unmap_and_free_tx_resource(adapter,
253 tx_buffer_info);
254
255 tx_desc->wb.status = 0;
256
257 i++;
258 if (i == tx_ring->count)
259 i = 0;
260 }
261
262 eop = tx_ring->tx_buffer_info[i].next_to_watch;
263 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
264 }
265
266 tx_ring->next_to_clean = i;
267
268#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
269 if (unlikely(count && netif_carrier_ok(netdev) &&
270 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
271 /* Make sure that anybody stopping the queue after this
272 * sees the new next_to_clean.
273 */
274 smp_mb();
275#ifdef HAVE_TX_MQ
276 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
277 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
278 netif_wake_subqueue(netdev, tx_ring->queue_index);
279 ++adapter->restart_queue;
280 }
281#else
282 if (netif_queue_stopped(netdev) &&
283 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
284 netif_wake_queue(netdev);
285 ++adapter->restart_queue;
286 }
287#endif
288 }
289
290 if (adapter->detect_tx_hung) {
291 if (ixgbevf_check_tx_hang(adapter, tx_ring, i)) {
292 /* schedule immediate reset if we believe we hung */
293 printk(KERN_INFO
294 "tx hang %d detected, resetting adapter\n",
295 adapter->tx_timeout_count + 1);
296 ixgbevf_tx_timeout(adapter->netdev);
297 }
298 }
299
300 /* re-arm the interrupt */
301 if ((count >= tx_ring->work_limit) &&
302 (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
303 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
304 }
305
306 tx_ring->total_bytes += total_bytes;
307 tx_ring->total_packets += total_packets;
308
309 adapter->net_stats.tx_bytes += total_bytes;
310 adapter->net_stats.tx_packets += total_packets;
311
312 return (count < tx_ring->work_limit);
313}
314
315/**
316 * ixgbevf_receive_skb - Send a completed packet up the stack
317 * @q_vector: structure containing interrupt and ring information
318 * @skb: packet to send up
319 * @status: hardware indication of status of receive
320 * @rx_ring: rx descriptor ring (for a specific queue) to setup
321 * @rx_desc: rx descriptor
322 **/
323static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
324 struct sk_buff *skb, u8 status,
325 struct ixgbevf_ring *ring,
326 union ixgbe_adv_rx_desc *rx_desc)
327{
328 struct ixgbevf_adapter *adapter = q_vector->adapter;
329 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
330 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
331 int ret;
332
333 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
334 if (adapter->vlgrp && is_vlan)
335 vlan_gro_receive(&q_vector->napi,
336 adapter->vlgrp,
337 tag, skb);
338 else
339 napi_gro_receive(&q_vector->napi, skb);
340 } else {
341 if (adapter->vlgrp && is_vlan)
342 ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
343 else
344 ret = netif_rx(skb);
345 }
346}
347
348/**
349 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
350 * @adapter: address of board private structure
351 * @status_err: hardware indication of status of receive
352 * @skb: skb currently being received and modified
353 **/
354static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
355 u32 status_err, struct sk_buff *skb)
356{
357 skb->ip_summed = CHECKSUM_NONE;
358
359 /* Rx csum disabled */
360 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
361 return;
362
363 /* if IP and error */
364 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
365 (status_err & IXGBE_RXDADV_ERR_IPE)) {
366 adapter->hw_csum_rx_error++;
367 return;
368 }
369
370 if (!(status_err & IXGBE_RXD_STAT_L4CS))
371 return;
372
373 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
374 adapter->hw_csum_rx_error++;
375 return;
376 }
377
378 /* It must be a TCP or UDP packet with a valid checksum */
379 skb->ip_summed = CHECKSUM_UNNECESSARY;
380 adapter->hw_csum_rx_good++;
381}
382
383/**
384 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
385 * @adapter: address of board private structure
386 **/
387static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
388 struct ixgbevf_ring *rx_ring,
389 int cleaned_count)
390{
391 struct pci_dev *pdev = adapter->pdev;
392 union ixgbe_adv_rx_desc *rx_desc;
393 struct ixgbevf_rx_buffer *bi;
394 struct sk_buff *skb;
395 unsigned int i;
396 unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
397
398 i = rx_ring->next_to_use;
399 bi = &rx_ring->rx_buffer_info[i];
400
401 while (cleaned_count--) {
402 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
403
404 if (!bi->page_dma &&
405 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
406 if (!bi->page) {
407 bi->page = netdev_alloc_page(adapter->netdev);
408 if (!bi->page) {
409 adapter->alloc_rx_page_failed++;
410 goto no_buffers;
411 }
412 bi->page_offset = 0;
413 } else {
414 /* use a half page if we're re-using */
415 bi->page_offset ^= (PAGE_SIZE / 2);
416 }
417
418 bi->page_dma = pci_map_page(pdev, bi->page,
419 bi->page_offset,
420 (PAGE_SIZE / 2),
421 PCI_DMA_FROMDEVICE);
422 }
423
424 skb = bi->skb;
425 if (!skb) {
426 skb = netdev_alloc_skb(adapter->netdev,
427 bufsz);
428
429 if (!skb) {
430 adapter->alloc_rx_buff_failed++;
431 goto no_buffers;
432 }
433
434 /*
435 * Make buffer alignment 2 beyond a 16 byte boundary
436 * this will result in a 16 byte aligned IP header after
437 * the 14 byte MAC header is removed
438 */
439 skb_reserve(skb, NET_IP_ALIGN);
440
441 bi->skb = skb;
442 }
443 if (!bi->dma) {
444 bi->dma = pci_map_single(pdev, skb->data,
445 rx_ring->rx_buf_len,
446 PCI_DMA_FROMDEVICE);
447 }
448 /* Refresh the desc even if buffer_addrs didn't change because
449 * each write-back erases this info. */
450 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
451 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
452 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
453 } else {
454 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
455 }
456
457 i++;
458 if (i == rx_ring->count)
459 i = 0;
460 bi = &rx_ring->rx_buffer_info[i];
461 }
462
463no_buffers:
464 if (rx_ring->next_to_use != i) {
465 rx_ring->next_to_use = i;
466 if (i-- == 0)
467 i = (rx_ring->count - 1);
468
469 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
470 }
471}
472
473static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
474 u64 qmask)
475{
476 u32 mask;
477 struct ixgbe_hw *hw = &adapter->hw;
478
479 mask = (qmask & 0xFFFFFFFF);
480 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
481}
482
483static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
484{
485 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
486}
487
488static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
489{
490 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
491}
492
493static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
494 struct ixgbevf_ring *rx_ring,
495 int *work_done, int work_to_do)
496{
497 struct ixgbevf_adapter *adapter = q_vector->adapter;
498 struct pci_dev *pdev = adapter->pdev;
499 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
500 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
501 struct sk_buff *skb;
502 unsigned int i;
503 u32 len, staterr;
504 u16 hdr_info;
505 bool cleaned = false;
506 int cleaned_count = 0;
507 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
508
509 i = rx_ring->next_to_clean;
510 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
511 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
512 rx_buffer_info = &rx_ring->rx_buffer_info[i];
513
514 while (staterr & IXGBE_RXD_STAT_DD) {
515 u32 upper_len = 0;
516 if (*work_done >= work_to_do)
517 break;
518 (*work_done)++;
519
520 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
521 hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
522 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
523 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
524 if (hdr_info & IXGBE_RXDADV_SPH)
525 adapter->rx_hdr_split++;
526 if (len > IXGBEVF_RX_HDR_SIZE)
527 len = IXGBEVF_RX_HDR_SIZE;
528 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
529 } else {
530 len = le16_to_cpu(rx_desc->wb.upper.length);
531 }
532 cleaned = true;
533 skb = rx_buffer_info->skb;
534 prefetch(skb->data - NET_IP_ALIGN);
535 rx_buffer_info->skb = NULL;
536
537 if (rx_buffer_info->dma) {
538 pci_unmap_single(pdev, rx_buffer_info->dma,
539 rx_ring->rx_buf_len,
540 PCI_DMA_FROMDEVICE);
541 rx_buffer_info->dma = 0;
542 skb_put(skb, len);
543 }
544
545 if (upper_len) {
546 pci_unmap_page(pdev, rx_buffer_info->page_dma,
547 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
548 rx_buffer_info->page_dma = 0;
549 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
550 rx_buffer_info->page,
551 rx_buffer_info->page_offset,
552 upper_len);
553
554 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
555 (page_count(rx_buffer_info->page) != 1))
556 rx_buffer_info->page = NULL;
557 else
558 get_page(rx_buffer_info->page);
559
560 skb->len += upper_len;
561 skb->data_len += upper_len;
562 skb->truesize += upper_len;
563 }
564
565 i++;
566 if (i == rx_ring->count)
567 i = 0;
568
569 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
570 prefetch(next_rxd);
571 cleaned_count++;
572
573 next_buffer = &rx_ring->rx_buffer_info[i];
574
575 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
576 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
577 rx_buffer_info->skb = next_buffer->skb;
578 rx_buffer_info->dma = next_buffer->dma;
579 next_buffer->skb = skb;
580 next_buffer->dma = 0;
581 } else {
582 skb->next = next_buffer->skb;
583 skb->next->prev = skb;
584 }
585 adapter->non_eop_descs++;
586 goto next_desc;
587 }
588
589 /* ERR_MASK will only have valid bits if EOP set */
590 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
591 dev_kfree_skb_irq(skb);
592 goto next_desc;
593 }
594
595 ixgbevf_rx_checksum(adapter, staterr, skb);
596
597 /* probably a little skewed due to removing CRC */
598 total_rx_bytes += skb->len;
599 total_rx_packets++;
600
601 /*
602 * Work around issue of some types of VM to VM loop back
603 * packets not getting split correctly
604 */
605 if (staterr & IXGBE_RXD_STAT_LB) {
606 u32 header_fixup_len = skb->len - skb->data_len;
607 if (header_fixup_len < 14)
608 skb_push(skb, header_fixup_len);
609 }
610 skb->protocol = eth_type_trans(skb, adapter->netdev);
611
612 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
613 adapter->netdev->last_rx = jiffies;
614
615next_desc:
616 rx_desc->wb.upper.status_error = 0;
617
618 /* return some buffers to hardware, one at a time is too slow */
619 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
620 ixgbevf_alloc_rx_buffers(adapter, rx_ring,
621 cleaned_count);
622 cleaned_count = 0;
623 }
624
625 /* use prefetched values */
626 rx_desc = next_rxd;
627 rx_buffer_info = &rx_ring->rx_buffer_info[i];
628
629 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
630 }
631
632 rx_ring->next_to_clean = i;
633 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
634
635 if (cleaned_count)
636 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
637
638 rx_ring->total_packets += total_rx_packets;
639 rx_ring->total_bytes += total_rx_bytes;
640 adapter->net_stats.rx_bytes += total_rx_bytes;
641 adapter->net_stats.rx_packets += total_rx_packets;
642
643 return cleaned;
644}
645
646/**
647 * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
648 * @napi: napi struct with our devices info in it
649 * @budget: amount of work driver is allowed to do this pass, in packets
650 *
651 * This function is optimized for cleaning one queue only on a single
652 * q_vector!!!
653 **/
654static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
655{
656 struct ixgbevf_q_vector *q_vector =
657 container_of(napi, struct ixgbevf_q_vector, napi);
658 struct ixgbevf_adapter *adapter = q_vector->adapter;
659 struct ixgbevf_ring *rx_ring = NULL;
660 int work_done = 0;
661 long r_idx;
662
663 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
664 rx_ring = &(adapter->rx_ring[r_idx]);
665
666 ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
667
668 /* If all Rx work done, exit the polling mode */
669 if (work_done < budget) {
670 napi_complete(napi);
671 if (adapter->itr_setting & 1)
672 ixgbevf_set_itr_msix(q_vector);
673 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
674 ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
675 }
676
677 return work_done;
678}
679
680/**
681 * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
682 * @napi: napi struct with our devices info in it
683 * @budget: amount of work driver is allowed to do this pass, in packets
684 *
685 * This function will clean more than one rx queue associated with a
686 * q_vector.
687 **/
688static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
689{
690 struct ixgbevf_q_vector *q_vector =
691 container_of(napi, struct ixgbevf_q_vector, napi);
692 struct ixgbevf_adapter *adapter = q_vector->adapter;
693 struct ixgbevf_ring *rx_ring = NULL;
694 int work_done = 0, i;
695 long r_idx;
696 u64 enable_mask = 0;
697
698 /* attempt to distribute budget to each queue fairly, but don't allow
699 * the budget to go below 1 because we'll exit polling */
700 budget /= (q_vector->rxr_count ?: 1);
701 budget = max(budget, 1);
702 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
703 for (i = 0; i < q_vector->rxr_count; i++) {
704 rx_ring = &(adapter->rx_ring[r_idx]);
705 ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
706 enable_mask |= rx_ring->v_idx;
707 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
708 r_idx + 1);
709 }
710
711#ifndef HAVE_NETDEV_NAPI_LIST
712 if (!netif_running(adapter->netdev))
713 work_done = 0;
714
715#endif
716 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
717 rx_ring = &(adapter->rx_ring[r_idx]);
718
719 /* If all Rx work done, exit the polling mode */
720 if (work_done < budget) {
721 napi_complete(napi);
722 if (adapter->itr_setting & 1)
723 ixgbevf_set_itr_msix(q_vector);
724 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
725 ixgbevf_irq_enable_queues(adapter, enable_mask);
726 }
727
728 return work_done;
729}
730
731
732/**
733 * ixgbevf_configure_msix - Configure MSI-X hardware
734 * @adapter: board private structure
735 *
736 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
737 * interrupts.
738 **/
739static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
740{
741 struct ixgbevf_q_vector *q_vector;
742 struct ixgbe_hw *hw = &adapter->hw;
743 int i, j, q_vectors, v_idx, r_idx;
744 u32 mask;
745
746 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
747
748 /*
749 * Populate the IVAR table and set the ITR values to the
750 * corresponding register.
751 */
752 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
753 q_vector = adapter->q_vector[v_idx];
754 /* XXX for_each_bit(...) */
755 r_idx = find_first_bit(q_vector->rxr_idx,
756 adapter->num_rx_queues);
757
758 for (i = 0; i < q_vector->rxr_count; i++) {
759 j = adapter->rx_ring[r_idx].reg_idx;
760 ixgbevf_set_ivar(adapter, 0, j, v_idx);
761 r_idx = find_next_bit(q_vector->rxr_idx,
762 adapter->num_rx_queues,
763 r_idx + 1);
764 }
765 r_idx = find_first_bit(q_vector->txr_idx,
766 adapter->num_tx_queues);
767
768 for (i = 0; i < q_vector->txr_count; i++) {
769 j = adapter->tx_ring[r_idx].reg_idx;
770 ixgbevf_set_ivar(adapter, 1, j, v_idx);
771 r_idx = find_next_bit(q_vector->txr_idx,
772 adapter->num_tx_queues,
773 r_idx + 1);
774 }
775
776 /* if this is a tx only vector halve the interrupt rate */
777 if (q_vector->txr_count && !q_vector->rxr_count)
778 q_vector->eitr = (adapter->eitr_param >> 1);
779 else if (q_vector->rxr_count)
780 /* rx only */
781 q_vector->eitr = adapter->eitr_param;
782
783 ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr);
784 }
785
786 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
787
788 /* set up to autoclear timer, and the vectors */
789 mask = IXGBE_EIMS_ENABLE_MASK;
790 mask &= ~IXGBE_EIMS_OTHER;
791 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
792}
793
794enum latency_range {
795 lowest_latency = 0,
796 low_latency = 1,
797 bulk_latency = 2,
798 latency_invalid = 255
799};
800
801/**
802 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
803 * @adapter: pointer to adapter
804 * @eitr: eitr setting (ints per sec) to give last timeslice
805 * @itr_setting: current throttle rate in ints/second
806 * @packets: the number of packets during this measurement interval
807 * @bytes: the number of bytes during this measurement interval
808 *
809 * Stores a new ITR value based on packets and byte
810 * counts during the last interrupt. The advantage of per interrupt
811 * computation is faster updates and more accurate ITR for the current
812 * traffic pattern. Constants in this function were computed
813 * based on theoretical maximum wire speed and thresholds were set based
814 * on testing data as well as attempting to minimize response time
815 * while increasing bulk throughput.
816 **/
817static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
818 u32 eitr, u8 itr_setting,
819 int packets, int bytes)
820{
821 unsigned int retval = itr_setting;
822 u32 timepassed_us;
823 u64 bytes_perint;
824
825 if (packets == 0)
826 goto update_itr_done;
827
828
829 /* simple throttlerate management
830 * 0-20MB/s lowest (100000 ints/s)
831 * 20-100MB/s low (20000 ints/s)
832 * 100-1249MB/s bulk (8000 ints/s)
833 */
834 /* what was last interrupt timeslice? */
835 timepassed_us = 1000000/eitr;
836 bytes_perint = bytes / timepassed_us; /* bytes/usec */
837
838 switch (itr_setting) {
839 case lowest_latency:
840 if (bytes_perint > adapter->eitr_low)
841 retval = low_latency;
842 break;
843 case low_latency:
844 if (bytes_perint > adapter->eitr_high)
845 retval = bulk_latency;
846 else if (bytes_perint <= adapter->eitr_low)
847 retval = lowest_latency;
848 break;
849 case bulk_latency:
850 if (bytes_perint <= adapter->eitr_high)
851 retval = low_latency;
852 break;
853 }
854
855update_itr_done:
856 return retval;
857}
858
859/**
860 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
861 * @adapter: pointer to adapter struct
862 * @v_idx: vector index into q_vector array
863 * @itr_reg: new value to be written in *register* format, not ints/s
864 *
865 * This function is made to be called by ethtool and by the driver
866 * when it needs to update VTEITR registers at runtime. Hardware
867 * specific quirks/differences are taken care of here.
868 */
869static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
870 u32 itr_reg)
871{
872 struct ixgbe_hw *hw = &adapter->hw;
873
874 itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg);
875
876 /*
877 * set the WDIS bit to not clear the timer bits and cause an
878 * immediate assertion of the interrupt
879 */
880 itr_reg |= IXGBE_EITR_CNT_WDIS;
881
882 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
883}
884
885static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
886{
887 struct ixgbevf_adapter *adapter = q_vector->adapter;
888 u32 new_itr;
889 u8 current_itr, ret_itr;
890 int i, r_idx, v_idx = q_vector->v_idx;
891 struct ixgbevf_ring *rx_ring, *tx_ring;
892
893 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
894 for (i = 0; i < q_vector->txr_count; i++) {
895 tx_ring = &(adapter->tx_ring[r_idx]);
896 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
897 q_vector->tx_itr,
898 tx_ring->total_packets,
899 tx_ring->total_bytes);
900 /* if the result for this queue would decrease interrupt
901 * rate for this vector then use that result */
902 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
903 q_vector->tx_itr - 1 : ret_itr);
904 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
905 r_idx + 1);
906 }
907
908 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
909 for (i = 0; i < q_vector->rxr_count; i++) {
910 rx_ring = &(adapter->rx_ring[r_idx]);
911 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
912 q_vector->rx_itr,
913 rx_ring->total_packets,
914 rx_ring->total_bytes);
915 /* if the result for this queue would decrease interrupt
916 * rate for this vector then use that result */
917 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
918 q_vector->rx_itr - 1 : ret_itr);
919 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
920 r_idx + 1);
921 }
922
923 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
924
925 switch (current_itr) {
926 /* counts and packets in update_itr are dependent on these numbers */
927 case lowest_latency:
928 new_itr = 100000;
929 break;
930 case low_latency:
931 new_itr = 20000; /* aka hwitr = ~200 */
932 break;
933 case bulk_latency:
934 default:
935 new_itr = 8000;
936 break;
937 }
938
939 if (new_itr != q_vector->eitr) {
940 u32 itr_reg;
941
942 /* save the algorithm value here, not the smoothed one */
943 q_vector->eitr = new_itr;
944 /* do an exponential smoothing */
945 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
946 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
947 ixgbevf_write_eitr(adapter, v_idx, itr_reg);
948 }
949
950 return;
951}
952
953static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
954{
955 struct net_device *netdev = data;
956 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
957 struct ixgbe_hw *hw = &adapter->hw;
958 u32 eicr;
959 u32 msg;
960
961 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
962 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
963
964 hw->mbx.ops.read(hw, &msg, 1);
965
966 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
967 mod_timer(&adapter->watchdog_timer,
968 round_jiffies(jiffies + 10));
969
970 return IRQ_HANDLED;
971}
972
973static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
974{
975 struct ixgbevf_q_vector *q_vector = data;
976 struct ixgbevf_adapter *adapter = q_vector->adapter;
977 struct ixgbevf_ring *tx_ring;
978 int i, r_idx;
979
980 if (!q_vector->txr_count)
981 return IRQ_HANDLED;
982
983 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
984 for (i = 0; i < q_vector->txr_count; i++) {
985 tx_ring = &(adapter->tx_ring[r_idx]);
986 tx_ring->total_bytes = 0;
987 tx_ring->total_packets = 0;
988 ixgbevf_clean_tx_irq(adapter, tx_ring);
989 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
990 r_idx + 1);
991 }
992
993 if (adapter->itr_setting & 1)
994 ixgbevf_set_itr_msix(q_vector);
995
996 return IRQ_HANDLED;
997}
998
999/**
1000 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
1001 * @irq: unused
1002 * @data: pointer to our q_vector struct for this interrupt vector
1003 **/
1004static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
1005{
1006 struct ixgbevf_q_vector *q_vector = data;
1007 struct ixgbevf_adapter *adapter = q_vector->adapter;
1008 struct ixgbe_hw *hw = &adapter->hw;
1009 struct ixgbevf_ring *rx_ring;
1010 int r_idx;
1011 int i;
1012
1013 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1014 for (i = 0; i < q_vector->rxr_count; i++) {
1015 rx_ring = &(adapter->rx_ring[r_idx]);
1016 rx_ring->total_bytes = 0;
1017 rx_ring->total_packets = 0;
1018 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1019 r_idx + 1);
1020 }
1021
1022 if (!q_vector->rxr_count)
1023 return IRQ_HANDLED;
1024
1025 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1026 rx_ring = &(adapter->rx_ring[r_idx]);
1027 /* disable interrupts on this vector only */
1028 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
1029 napi_schedule(&q_vector->napi);
1030
1031
1032 return IRQ_HANDLED;
1033}
1034
1035static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data)
1036{
1037 ixgbevf_msix_clean_rx(irq, data);
1038 ixgbevf_msix_clean_tx(irq, data);
1039
1040 return IRQ_HANDLED;
1041}
1042
1043static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1044 int r_idx)
1045{
1046 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1047
1048 set_bit(r_idx, q_vector->rxr_idx);
1049 q_vector->rxr_count++;
1050 a->rx_ring[r_idx].v_idx = 1 << v_idx;
1051}
1052
1053static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1054 int t_idx)
1055{
1056 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1057
1058 set_bit(t_idx, q_vector->txr_idx);
1059 q_vector->txr_count++;
1060 a->tx_ring[t_idx].v_idx = 1 << v_idx;
1061}
1062
1063/**
1064 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1065 * @adapter: board private structure to initialize
1066 *
1067 * This function maps descriptor rings to the queue-specific vectors
1068 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1069 * one vector per ring/queue, but on a constrained vector budget, we
1070 * group the rings as "efficiently" as possible. You would add new
1071 * mapping configurations in here.
1072 **/
1073static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
1074{
1075 int q_vectors;
1076 int v_start = 0;
1077 int rxr_idx = 0, txr_idx = 0;
1078 int rxr_remaining = adapter->num_rx_queues;
1079 int txr_remaining = adapter->num_tx_queues;
1080 int i, j;
1081 int rqpv, tqpv;
1082 int err = 0;
1083
1084 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1085
1086 /*
1087 * The ideal configuration...
1088 * We have enough vectors to map one per queue.
1089 */
1090 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1091 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1092 map_vector_to_rxq(adapter, v_start, rxr_idx);
1093
1094 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1095 map_vector_to_txq(adapter, v_start, txr_idx);
1096 goto out;
1097 }
1098
1099 /*
1100 * If we don't have enough vectors for a 1-to-1
1101 * mapping, we'll have to group them so there are
1102 * multiple queues per vector.
1103 */
1104 /* Re-adjusting *qpv takes care of the remainder. */
1105 for (i = v_start; i < q_vectors; i++) {
1106 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
1107 for (j = 0; j < rqpv; j++) {
1108 map_vector_to_rxq(adapter, i, rxr_idx);
1109 rxr_idx++;
1110 rxr_remaining--;
1111 }
1112 }
1113 for (i = v_start; i < q_vectors; i++) {
1114 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1115 for (j = 0; j < tqpv; j++) {
1116 map_vector_to_txq(adapter, i, txr_idx);
1117 txr_idx++;
1118 txr_remaining--;
1119 }
1120 }
1121
1122out:
1123 return err;
1124}
1125
1126/**
1127 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1128 * @adapter: board private structure
1129 *
1130 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1131 * interrupts from the kernel.
1132 **/
1133static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1134{
1135 struct net_device *netdev = adapter->netdev;
1136 irqreturn_t (*handler)(int, void *);
1137 int i, vector, q_vectors, err;
1138 int ri = 0, ti = 0;
1139
1140 /* Decrement for Other and TCP Timer vectors */
1141 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1142
1143#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
1144 ? &ixgbevf_msix_clean_many : \
1145 (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
1146 (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
1147 NULL)
1148 for (vector = 0; vector < q_vectors; vector++) {
1149 handler = SET_HANDLER(adapter->q_vector[vector]);
1150
1151 if (handler == &ixgbevf_msix_clean_rx) {
1152 sprintf(adapter->name[vector], "%s-%s-%d",
1153 netdev->name, "rx", ri++);
1154 } else if (handler == &ixgbevf_msix_clean_tx) {
1155 sprintf(adapter->name[vector], "%s-%s-%d",
1156 netdev->name, "tx", ti++);
1157 } else if (handler == &ixgbevf_msix_clean_many) {
1158 sprintf(adapter->name[vector], "%s-%s-%d",
1159 netdev->name, "TxRx", vector);
1160 } else {
1161 /* skip this unused q_vector */
1162 continue;
1163 }
1164 err = request_irq(adapter->msix_entries[vector].vector,
1165 handler, 0, adapter->name[vector],
1166 adapter->q_vector[vector]);
1167 if (err) {
1168 hw_dbg(&adapter->hw,
1169 "request_irq failed for MSIX interrupt "
1170 "Error: %d\n", err);
1171 goto free_queue_irqs;
1172 }
1173 }
1174
1175 sprintf(adapter->name[vector], "%s:mbx", netdev->name);
1176 err = request_irq(adapter->msix_entries[vector].vector,
1177 &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev);
1178 if (err) {
1179 hw_dbg(&adapter->hw,
1180 "request_irq for msix_mbx failed: %d\n", err);
1181 goto free_queue_irqs;
1182 }
1183
1184 return 0;
1185
1186free_queue_irqs:
1187 for (i = vector - 1; i >= 0; i--)
1188 free_irq(adapter->msix_entries[--vector].vector,
1189 &(adapter->q_vector[i]));
1190 pci_disable_msix(adapter->pdev);
1191 kfree(adapter->msix_entries);
1192 adapter->msix_entries = NULL;
1193 return err;
1194}
1195
1196static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1197{
1198 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1199
1200 for (i = 0; i < q_vectors; i++) {
1201 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1202 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1203 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1204 q_vector->rxr_count = 0;
1205 q_vector->txr_count = 0;
1206 q_vector->eitr = adapter->eitr_param;
1207 }
1208}
1209
1210/**
1211 * ixgbevf_request_irq - initialize interrupts
1212 * @adapter: board private structure
1213 *
1214 * Attempts to configure interrupts using the best available
1215 * capabilities of the hardware and kernel.
1216 **/
1217static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1218{
1219 int err = 0;
1220
1221 err = ixgbevf_request_msix_irqs(adapter);
1222
1223 if (err)
1224 hw_dbg(&adapter->hw,
1225 "request_irq failed, Error %d\n", err);
1226
1227 return err;
1228}
1229
1230static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1231{
1232 struct net_device *netdev = adapter->netdev;
1233 int i, q_vectors;
1234
1235 q_vectors = adapter->num_msix_vectors;
1236
1237 i = q_vectors - 1;
1238
1239 free_irq(adapter->msix_entries[i].vector, netdev);
1240 i--;
1241
1242 for (; i >= 0; i--) {
1243 free_irq(adapter->msix_entries[i].vector,
1244 adapter->q_vector[i]);
1245 }
1246
1247 ixgbevf_reset_q_vectors(adapter);
1248}
1249
1250/**
1251 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1252 * @adapter: board private structure
1253 **/
1254static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1255{
1256 int i;
1257 struct ixgbe_hw *hw = &adapter->hw;
1258
1259 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1260
1261 IXGBE_WRITE_FLUSH(hw);
1262
1263 for (i = 0; i < adapter->num_msix_vectors; i++)
1264 synchronize_irq(adapter->msix_entries[i].vector);
1265}
1266
1267/**
1268 * ixgbevf_irq_enable - Enable default interrupt generation settings
1269 * @adapter: board private structure
1270 **/
1271static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter,
1272 bool queues, bool flush)
1273{
1274 struct ixgbe_hw *hw = &adapter->hw;
1275 u32 mask;
1276 u64 qmask;
1277
1278 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1279 qmask = ~0;
1280
1281 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1282
1283 if (queues)
1284 ixgbevf_irq_enable_queues(adapter, qmask);
1285
1286 if (flush)
1287 IXGBE_WRITE_FLUSH(hw);
1288}
1289
1290/**
1291 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1292 * @adapter: board private structure
1293 *
1294 * Configure the Tx unit of the MAC after a reset.
1295 **/
1296static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1297{
1298 u64 tdba;
1299 struct ixgbe_hw *hw = &adapter->hw;
1300 u32 i, j, tdlen, txctrl;
1301
1302 /* Setup the HW Tx Head and Tail descriptor pointers */
1303 for (i = 0; i < adapter->num_tx_queues; i++) {
1304 struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1305 j = ring->reg_idx;
1306 tdba = ring->dma;
1307 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1308 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1309 (tdba & DMA_BIT_MASK(32)));
1310 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1311 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1312 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1313 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1314 adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1315 adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1316 /* Disable Tx Head Writeback RO bit, since this hoses
1317 * bookkeeping if things aren't delivered in order.
1318 */
1319 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1320 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1321 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1322 }
1323}
1324
1325#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1326
1327static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1328{
1329 struct ixgbevf_ring *rx_ring;
1330 struct ixgbe_hw *hw = &adapter->hw;
1331 u32 srrctl;
1332
1333 rx_ring = &adapter->rx_ring[index];
1334
1335 srrctl = IXGBE_SRRCTL_DROP_EN;
1336
1337 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1338 u16 bufsz = IXGBEVF_RXBUFFER_2048;
1339 /* grow the amount we can receive on large page machines */
1340 if (bufsz < (PAGE_SIZE / 2))
1341 bufsz = (PAGE_SIZE / 2);
1342 /* cap the bufsz at our largest descriptor size */
1343 bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
1344
1345 srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1346 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1347 srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
1348 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1349 IXGBE_SRRCTL_BSIZEHDR_MASK);
1350 } else {
1351 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1352
1353 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1354 srrctl |= IXGBEVF_RXBUFFER_2048 >>
1355 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1356 else
1357 srrctl |= rx_ring->rx_buf_len >>
1358 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1359 }
1360 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1361}
1362
1363/**
1364 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1365 * @adapter: board private structure
1366 *
1367 * Configure the Rx unit of the MAC after a reset.
1368 **/
1369static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1370{
1371 u64 rdba;
1372 struct ixgbe_hw *hw = &adapter->hw;
1373 struct net_device *netdev = adapter->netdev;
1374 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1375 int i, j;
1376 u32 rdlen;
1377 int rx_buf_len;
1378
1379 /* Decide whether to use packet split mode or not */
1380 if (netdev->mtu > ETH_DATA_LEN) {
1381 if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
1382 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1383 else
1384 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1385 } else {
1386 if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
1387 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1388 else
1389 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1390 }
1391
1392 /* Set the RX buffer length according to the mode */
1393 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1394 /* PSRTYPE must be initialized in 82599 */
1395 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
1396 IXGBE_PSRTYPE_UDPHDR |
1397 IXGBE_PSRTYPE_IPV4HDR |
1398 IXGBE_PSRTYPE_IPV6HDR |
1399 IXGBE_PSRTYPE_L2HDR;
1400 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1401 rx_buf_len = IXGBEVF_RX_HDR_SIZE;
1402 } else {
1403 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
1404 if (netdev->mtu <= ETH_DATA_LEN)
1405 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1406 else
1407 rx_buf_len = ALIGN(max_frame, 1024);
1408 }
1409
1410 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1411 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1412 * the Base and Length of the Rx Descriptor Ring */
1413 for (i = 0; i < adapter->num_rx_queues; i++) {
1414 rdba = adapter->rx_ring[i].dma;
1415 j = adapter->rx_ring[i].reg_idx;
1416 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1417 (rdba & DMA_BIT_MASK(32)));
1418 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1419 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1420 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1421 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1422 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1423 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1424 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1425
1426 ixgbevf_configure_srrctl(adapter, j);
1427 }
1428}
1429
1430static void ixgbevf_vlan_rx_register(struct net_device *netdev,
1431 struct vlan_group *grp)
1432{
1433 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1434 struct ixgbe_hw *hw = &adapter->hw;
1435 int i, j;
1436 u32 ctrl;
1437
1438 adapter->vlgrp = grp;
1439
1440 for (i = 0; i < adapter->num_rx_queues; i++) {
1441 j = adapter->rx_ring[i].reg_idx;
1442 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1443 ctrl |= IXGBE_RXDCTL_VME;
1444 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), ctrl);
1445 }
1446}
1447
1448static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1449{
1450 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1451 struct ixgbe_hw *hw = &adapter->hw;
1452 struct net_device *v_netdev;
1453
1454 /* add VID to filter table */
1455 if (hw->mac.ops.set_vfta)
1456 hw->mac.ops.set_vfta(hw, vid, 0, true);
1457 /*
1458 * Copy feature flags from netdev to the vlan netdev for this vid.
1459 * This allows things like TSO to bubble down to our vlan device.
1460 */
1461 v_netdev = vlan_group_get_device(adapter->vlgrp, vid);
1462 v_netdev->features |= adapter->netdev->features;
1463 vlan_group_set_device(adapter->vlgrp, vid, v_netdev);
1464}
1465
1466static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1467{
1468 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1469 struct ixgbe_hw *hw = &adapter->hw;
1470
1471 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
1472 ixgbevf_irq_disable(adapter);
1473
1474 vlan_group_set_device(adapter->vlgrp, vid, NULL);
1475
1476 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
1477 ixgbevf_irq_enable(adapter, true, true);
1478
1479 /* remove VID from filter table */
1480 if (hw->mac.ops.set_vfta)
1481 hw->mac.ops.set_vfta(hw, vid, 0, false);
1482}
1483
1484static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1485{
1486 ixgbevf_vlan_rx_register(adapter->netdev, adapter->vlgrp);
1487
1488 if (adapter->vlgrp) {
1489 u16 vid;
1490 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1491 if (!vlan_group_get_device(adapter->vlgrp, vid))
1492 continue;
1493 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
1494 }
1495 }
1496}
1497
1498static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr,
1499 u32 *vmdq)
1500{
1501 struct dev_mc_list *mc_ptr;
1502 u8 *addr = *mc_addr_ptr;
1503 *vmdq = 0;
1504
1505 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
1506 if (mc_ptr->next)
1507 *mc_addr_ptr = mc_ptr->next->dmi_addr;
1508 else
1509 *mc_addr_ptr = NULL;
1510
1511 return addr;
1512}
1513
1514/**
1515 * ixgbevf_set_rx_mode - Multicast set
1516 * @netdev: network interface device structure
1517 *
1518 * The set_rx_method entry point is called whenever the multicast address
1519 * list or the network interface flags are updated. This routine is
1520 * responsible for configuring the hardware for proper multicast mode.
1521 **/
1522static void ixgbevf_set_rx_mode(struct net_device *netdev)
1523{
1524 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1525 struct ixgbe_hw *hw = &adapter->hw;
1526 u8 *addr_list = NULL;
1527 int addr_count = 0;
1528
1529 /* reprogram multicast list */
1530 addr_count = netdev_mc_count(netdev);
1531 if (addr_count)
1532 addr_list = netdev->mc_list->dmi_addr;
1533 if (hw->mac.ops.update_mc_addr_list)
1534 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
1535 ixgbevf_addr_list_itr);
1536}
1537
1538static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1539{
1540 int q_idx;
1541 struct ixgbevf_q_vector *q_vector;
1542 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1543
1544 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1545 struct napi_struct *napi;
1546 q_vector = adapter->q_vector[q_idx];
1547 if (!q_vector->rxr_count)
1548 continue;
1549 napi = &q_vector->napi;
1550 if (q_vector->rxr_count > 1)
1551 napi->poll = &ixgbevf_clean_rxonly_many;
1552
1553 napi_enable(napi);
1554 }
1555}
1556
1557static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1558{
1559 int q_idx;
1560 struct ixgbevf_q_vector *q_vector;
1561 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1562
1563 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1564 q_vector = adapter->q_vector[q_idx];
1565 if (!q_vector->rxr_count)
1566 continue;
1567 napi_disable(&q_vector->napi);
1568 }
1569}
1570
1571static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1572{
1573 struct net_device *netdev = adapter->netdev;
1574 int i;
1575
1576 ixgbevf_set_rx_mode(netdev);
1577
1578 ixgbevf_restore_vlan(adapter);
1579
1580 ixgbevf_configure_tx(adapter);
1581 ixgbevf_configure_rx(adapter);
1582 for (i = 0; i < adapter->num_rx_queues; i++) {
1583 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1584 ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
1585 ring->next_to_use = ring->count - 1;
1586 writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
1587 }
1588}
1589
1590#define IXGBE_MAX_RX_DESC_POLL 10
1591static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1592 int rxr)
1593{
1594 struct ixgbe_hw *hw = &adapter->hw;
1595 int j = adapter->rx_ring[rxr].reg_idx;
1596 int k;
1597
1598 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
1599 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1600 break;
1601 else
1602 msleep(1);
1603 }
1604 if (k >= IXGBE_MAX_RX_DESC_POLL) {
1605 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
1606 "not set within the polling period\n", rxr);
1607 }
1608
1609 ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
1610 (adapter->rx_ring[rxr].count - 1));
1611}
1612
1613static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1614{
1615 struct net_device *netdev = adapter->netdev;
1616 struct ixgbe_hw *hw = &adapter->hw;
1617 int i, j = 0;
1618 int num_rx_rings = adapter->num_rx_queues;
1619 u32 txdctl, rxdctl;
1620
1621 for (i = 0; i < adapter->num_tx_queues; i++) {
1622 j = adapter->tx_ring[i].reg_idx;
1623 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1624 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1625 txdctl |= (8 << 16);
1626 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1627 }
1628
1629 for (i = 0; i < adapter->num_tx_queues; i++) {
1630 j = adapter->tx_ring[i].reg_idx;
1631 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1632 txdctl |= IXGBE_TXDCTL_ENABLE;
1633 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1634 }
1635
1636 for (i = 0; i < num_rx_rings; i++) {
1637 j = adapter->rx_ring[i].reg_idx;
1638 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1639 rxdctl |= IXGBE_RXDCTL_ENABLE;
1640 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1641 ixgbevf_rx_desc_queue_enable(adapter, i);
1642 }
1643
1644 ixgbevf_configure_msix(adapter);
1645
1646 if (hw->mac.ops.set_rar) {
1647 if (is_valid_ether_addr(hw->mac.addr))
1648 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1649 else
1650 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1651 }
1652
1653 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1654 ixgbevf_napi_enable_all(adapter);
1655
1656 /* enable transmits */
1657 netif_tx_start_all_queues(netdev);
1658
1659 /* bring the link up in the watchdog, this could race with our first
1660 * link up interrupt but shouldn't be a problem */
1661 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1662 adapter->link_check_timeout = jiffies;
1663 mod_timer(&adapter->watchdog_timer, jiffies);
1664 return 0;
1665}
1666
1667int ixgbevf_up(struct ixgbevf_adapter *adapter)
1668{
1669 int err;
1670 struct ixgbe_hw *hw = &adapter->hw;
1671
1672 ixgbevf_configure(adapter);
1673
1674 err = ixgbevf_up_complete(adapter);
1675
1676 /* clear any pending interrupts, may auto mask */
1677 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1678
1679 ixgbevf_irq_enable(adapter, true, true);
1680
1681 return err;
1682}
1683
1684/**
1685 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1686 * @adapter: board private structure
1687 * @rx_ring: ring to free buffers from
1688 **/
1689static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1690 struct ixgbevf_ring *rx_ring)
1691{
1692 struct pci_dev *pdev = adapter->pdev;
1693 unsigned long size;
1694 unsigned int i;
1695
1696 if (!rx_ring->rx_buffer_info)
1697 return;
1698
1699 /* Free all the Rx ring sk_buffs */
1700 for (i = 0; i < rx_ring->count; i++) {
1701 struct ixgbevf_rx_buffer *rx_buffer_info;
1702
1703 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1704 if (rx_buffer_info->dma) {
1705 pci_unmap_single(pdev, rx_buffer_info->dma,
1706 rx_ring->rx_buf_len,
1707 PCI_DMA_FROMDEVICE);
1708 rx_buffer_info->dma = 0;
1709 }
1710 if (rx_buffer_info->skb) {
1711 struct sk_buff *skb = rx_buffer_info->skb;
1712 rx_buffer_info->skb = NULL;
1713 do {
1714 struct sk_buff *this = skb;
1715 skb = skb->prev;
1716 dev_kfree_skb(this);
1717 } while (skb);
1718 }
1719 if (!rx_buffer_info->page)
1720 continue;
1721 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
1722 PCI_DMA_FROMDEVICE);
1723 rx_buffer_info->page_dma = 0;
1724 put_page(rx_buffer_info->page);
1725 rx_buffer_info->page = NULL;
1726 rx_buffer_info->page_offset = 0;
1727 }
1728
1729 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1730 memset(rx_ring->rx_buffer_info, 0, size);
1731
1732 /* Zero out the descriptor ring */
1733 memset(rx_ring->desc, 0, rx_ring->size);
1734
1735 rx_ring->next_to_clean = 0;
1736 rx_ring->next_to_use = 0;
1737
1738 if (rx_ring->head)
1739 writel(0, adapter->hw.hw_addr + rx_ring->head);
1740 if (rx_ring->tail)
1741 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1742}
1743
1744/**
1745 * ixgbevf_clean_tx_ring - Free Tx Buffers
1746 * @adapter: board private structure
1747 * @tx_ring: ring to be cleaned
1748 **/
1749static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1750 struct ixgbevf_ring *tx_ring)
1751{
1752 struct ixgbevf_tx_buffer *tx_buffer_info;
1753 unsigned long size;
1754 unsigned int i;
1755
1756 if (!tx_ring->tx_buffer_info)
1757 return;
1758
1759 /* Free all the Tx ring sk_buffs */
1760
1761 for (i = 0; i < tx_ring->count; i++) {
1762 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1763 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
1764 }
1765
1766 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1767 memset(tx_ring->tx_buffer_info, 0, size);
1768
1769 memset(tx_ring->desc, 0, tx_ring->size);
1770
1771 tx_ring->next_to_use = 0;
1772 tx_ring->next_to_clean = 0;
1773
1774 if (tx_ring->head)
1775 writel(0, adapter->hw.hw_addr + tx_ring->head);
1776 if (tx_ring->tail)
1777 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1778}
1779
1780/**
1781 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1782 * @adapter: board private structure
1783 **/
1784static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1785{
1786 int i;
1787
1788 for (i = 0; i < adapter->num_rx_queues; i++)
1789 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1790}
1791
1792/**
1793 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1794 * @adapter: board private structure
1795 **/
1796static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1797{
1798 int i;
1799
1800 for (i = 0; i < adapter->num_tx_queues; i++)
1801 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1802}
1803
1804void ixgbevf_down(struct ixgbevf_adapter *adapter)
1805{
1806 struct net_device *netdev = adapter->netdev;
1807 struct ixgbe_hw *hw = &adapter->hw;
1808 u32 txdctl;
1809 int i, j;
1810
1811 /* signal that we are down to the interrupt handler */
1812 set_bit(__IXGBEVF_DOWN, &adapter->state);
1813 /* disable receives */
1814
1815 netif_tx_disable(netdev);
1816
1817 msleep(10);
1818
1819 netif_tx_stop_all_queues(netdev);
1820
1821 ixgbevf_irq_disable(adapter);
1822
1823 ixgbevf_napi_disable_all(adapter);
1824
1825 del_timer_sync(&adapter->watchdog_timer);
1826 /* can't call flush scheduled work here because it can deadlock
1827 * if linkwatch_event tries to acquire the rtnl_lock which we are
1828 * holding */
1829 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1830 msleep(1);
1831
1832 /* disable transmits in the hardware now that interrupts are off */
1833 for (i = 0; i < adapter->num_tx_queues; i++) {
1834 j = adapter->tx_ring[i].reg_idx;
1835 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1836 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1837 (txdctl & ~IXGBE_TXDCTL_ENABLE));
1838 }
1839
1840 netif_carrier_off(netdev);
1841
1842 if (!pci_channel_offline(adapter->pdev))
1843 ixgbevf_reset(adapter);
1844
1845 ixgbevf_clean_all_tx_rings(adapter);
1846 ixgbevf_clean_all_rx_rings(adapter);
1847}
1848
1849void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1850{
1851 struct ixgbe_hw *hw = &adapter->hw;
1852
1853 WARN_ON(in_interrupt());
1854
1855 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1856 msleep(1);
1857
1858 /*
1859 * Check if PF is up before re-init. If not then skip until
1860 * later when the PF is up and ready to service requests from
1861 * the VF via mailbox. If the VF is up and running then the
1862 * watchdog task will continue to schedule reset tasks until
1863 * the PF is up and running.
1864 */
1865 if (!hw->mac.ops.reset_hw(hw)) {
1866 ixgbevf_down(adapter);
1867 ixgbevf_up(adapter);
1868 }
1869
1870 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1871}
1872
1873void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1874{
1875 struct ixgbe_hw *hw = &adapter->hw;
1876 struct net_device *netdev = adapter->netdev;
1877
1878 if (hw->mac.ops.reset_hw(hw))
1879 hw_dbg(hw, "PF still resetting\n");
1880 else
1881 hw->mac.ops.init_hw(hw);
1882
1883 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1884 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1885 netdev->addr_len);
1886 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1887 netdev->addr_len);
1888 }
1889}
1890
1891static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1892 int vectors)
1893{
1894 int err, vector_threshold;
1895
1896 /* We'll want at least 3 (vector_threshold):
1897 * 1) TxQ[0] Cleanup
1898 * 2) RxQ[0] Cleanup
1899 * 3) Other (Link Status Change, etc.)
1900 */
1901 vector_threshold = MIN_MSIX_COUNT;
1902
1903 /* The more we get, the more we will assign to Tx/Rx Cleanup
1904 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1905 * Right now, we simply care about how many we'll get; we'll
1906 * set them up later while requesting irq's.
1907 */
1908 while (vectors >= vector_threshold) {
1909 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1910 vectors);
1911 if (!err) /* Success in acquiring all requested vectors. */
1912 break;
1913 else if (err < 0)
1914 vectors = 0; /* Nasty failure, quit now */
1915 else /* err == number of vectors we should try again with */
1916 vectors = err;
1917 }
1918
1919 if (vectors < vector_threshold) {
1920 /* Can't allocate enough MSI-X interrupts? Oh well.
1921 * This just means we'll go with either a single MSI
1922 * vector or fall back to legacy interrupts.
1923 */
1924 hw_dbg(&adapter->hw,
1925 "Unable to allocate MSI-X interrupts\n");
1926 kfree(adapter->msix_entries);
1927 adapter->msix_entries = NULL;
1928 } else {
1929 /*
1930 * Adjust for only the vectors we'll use, which is minimum
1931 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1932 * vectors we were allocated.
1933 */
1934 adapter->num_msix_vectors = vectors;
1935 }
1936}
1937
1938/*
1939 * ixgbe_set_num_queues: Allocate queues for device, feature dependant
1940 * @adapter: board private structure to initialize
1941 *
1942 * This is the top level queue allocation routine. The order here is very
1943 * important, starting with the "most" number of features turned on at once,
1944 * and ending with the smallest set of features. This way large combinations
1945 * can be allocated if they're turned on, and smaller combinations are the
1946 * fallthrough conditions.
1947 *
1948 **/
1949static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1950{
1951 /* Start with base case */
1952 adapter->num_rx_queues = 1;
1953 adapter->num_tx_queues = 1;
1954 adapter->num_rx_pools = adapter->num_rx_queues;
1955 adapter->num_rx_queues_per_pool = 1;
1956}
1957
1958/**
1959 * ixgbevf_alloc_queues - Allocate memory for all rings
1960 * @adapter: board private structure to initialize
1961 *
1962 * We allocate one ring per queue at run-time since we don't know the
1963 * number of queues at compile-time. The polling_netdev array is
1964 * intended for Multiqueue, but should work fine with a single queue.
1965 **/
1966static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1967{
1968 int i;
1969
1970 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1971 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1972 if (!adapter->tx_ring)
1973 goto err_tx_ring_allocation;
1974
1975 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1976 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1977 if (!adapter->rx_ring)
1978 goto err_rx_ring_allocation;
1979
1980 for (i = 0; i < adapter->num_tx_queues; i++) {
1981 adapter->tx_ring[i].count = adapter->tx_ring_count;
1982 adapter->tx_ring[i].queue_index = i;
1983 adapter->tx_ring[i].reg_idx = i;
1984 }
1985
1986 for (i = 0; i < adapter->num_rx_queues; i++) {
1987 adapter->rx_ring[i].count = adapter->rx_ring_count;
1988 adapter->rx_ring[i].queue_index = i;
1989 adapter->rx_ring[i].reg_idx = i;
1990 }
1991
1992 return 0;
1993
1994err_rx_ring_allocation:
1995 kfree(adapter->tx_ring);
1996err_tx_ring_allocation:
1997 return -ENOMEM;
1998}
1999
2000/**
2001 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2002 * @adapter: board private structure to initialize
2003 *
2004 * Attempt to configure the interrupts using the best available
2005 * capabilities of the hardware and the kernel.
2006 **/
2007static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2008{
2009 int err = 0;
2010 int vector, v_budget;
2011
2012 /*
2013 * It's easy to be greedy for MSI-X vectors, but it really
2014 * doesn't do us much good if we have a lot more vectors
2015 * than CPU's. So let's be conservative and only ask for
2016 * (roughly) twice the number of vectors as there are CPU's.
2017 */
2018 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
2019 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
2020
2021 /* A failure in MSI-X entry allocation isn't fatal, but it does
2022 * mean we disable MSI-X capabilities of the adapter. */
2023 adapter->msix_entries = kcalloc(v_budget,
2024 sizeof(struct msix_entry), GFP_KERNEL);
2025 if (!adapter->msix_entries) {
2026 err = -ENOMEM;
2027 goto out;
2028 }
2029
2030 for (vector = 0; vector < v_budget; vector++)
2031 adapter->msix_entries[vector].entry = vector;
2032
2033 ixgbevf_acquire_msix_vectors(adapter, v_budget);
2034
2035out:
2036 return err;
2037}
2038
2039/**
2040 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2041 * @adapter: board private structure to initialize
2042 *
2043 * We allocate one q_vector per queue interrupt. If allocation fails we
2044 * return -ENOMEM.
2045 **/
2046static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2047{
2048 int q_idx, num_q_vectors;
2049 struct ixgbevf_q_vector *q_vector;
2050 int napi_vectors;
2051 int (*poll)(struct napi_struct *, int);
2052
2053 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2054 napi_vectors = adapter->num_rx_queues;
2055 poll = &ixgbevf_clean_rxonly;
2056
2057 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2058 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2059 if (!q_vector)
2060 goto err_out;
2061 q_vector->adapter = adapter;
2062 q_vector->v_idx = q_idx;
2063 q_vector->eitr = adapter->eitr_param;
2064 if (q_idx < napi_vectors)
2065 netif_napi_add(adapter->netdev, &q_vector->napi,
2066 (*poll), 64);
2067 adapter->q_vector[q_idx] = q_vector;
2068 }
2069
2070 return 0;
2071
2072err_out:
2073 while (q_idx) {
2074 q_idx--;
2075 q_vector = adapter->q_vector[q_idx];
2076 netif_napi_del(&q_vector->napi);
2077 kfree(q_vector);
2078 adapter->q_vector[q_idx] = NULL;
2079 }
2080 return -ENOMEM;
2081}
2082
2083/**
2084 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2085 * @adapter: board private structure to initialize
2086 *
2087 * This function frees the memory allocated to the q_vectors. In addition if
2088 * NAPI is enabled it will delete any references to the NAPI struct prior
2089 * to freeing the q_vector.
2090 **/
2091static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2092{
2093 int q_idx, num_q_vectors;
2094 int napi_vectors;
2095
2096 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2097 napi_vectors = adapter->num_rx_queues;
2098
2099 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2100 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2101
2102 adapter->q_vector[q_idx] = NULL;
2103 if (q_idx < napi_vectors)
2104 netif_napi_del(&q_vector->napi);
2105 kfree(q_vector);
2106 }
2107}
2108
2109/**
2110 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2111 * @adapter: board private structure
2112 *
2113 **/
2114static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2115{
2116 pci_disable_msix(adapter->pdev);
2117 kfree(adapter->msix_entries);
2118 adapter->msix_entries = NULL;
2119
2120 return;
2121}
2122
2123/**
2124 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2125 * @adapter: board private structure to initialize
2126 *
2127 **/
2128static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2129{
2130 int err;
2131
2132 /* Number of supported queues */
2133 ixgbevf_set_num_queues(adapter);
2134
2135 err = ixgbevf_set_interrupt_capability(adapter);
2136 if (err) {
2137 hw_dbg(&adapter->hw,
2138 "Unable to setup interrupt capabilities\n");
2139 goto err_set_interrupt;
2140 }
2141
2142 err = ixgbevf_alloc_q_vectors(adapter);
2143 if (err) {
2144 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2145 "vectors\n");
2146 goto err_alloc_q_vectors;
2147 }
2148
2149 err = ixgbevf_alloc_queues(adapter);
2150 if (err) {
2151 printk(KERN_ERR "Unable to allocate memory for queues\n");
2152 goto err_alloc_queues;
2153 }
2154
2155 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2156 "Tx Queue count = %u\n",
2157 (adapter->num_rx_queues > 1) ? "Enabled" :
2158 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2159
2160 set_bit(__IXGBEVF_DOWN, &adapter->state);
2161
2162 return 0;
2163err_alloc_queues:
2164 ixgbevf_free_q_vectors(adapter);
2165err_alloc_q_vectors:
2166 ixgbevf_reset_interrupt_capability(adapter);
2167err_set_interrupt:
2168 return err;
2169}
2170
2171/**
2172 * ixgbevf_sw_init - Initialize general software structures
2173 * (struct ixgbevf_adapter)
2174 * @adapter: board private structure to initialize
2175 *
2176 * ixgbevf_sw_init initializes the Adapter private data structure.
2177 * Fields are initialized based on PCI device information and
2178 * OS network device settings (MTU size).
2179 **/
2180static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2181{
2182 struct ixgbe_hw *hw = &adapter->hw;
2183 struct pci_dev *pdev = adapter->pdev;
2184 int err;
2185
2186 /* PCI config space info */
2187
2188 hw->vendor_id = pdev->vendor;
2189 hw->device_id = pdev->device;
2190 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2191 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2192 hw->subsystem_device_id = pdev->subsystem_device;
2193
2194 hw->mbx.ops.init_params(hw);
2195 hw->mac.max_tx_queues = MAX_TX_QUEUES;
2196 hw->mac.max_rx_queues = MAX_RX_QUEUES;
2197 err = hw->mac.ops.reset_hw(hw);
2198 if (err) {
2199 dev_info(&pdev->dev,
2200 "PF still in reset state, assigning new address\n");
2201 random_ether_addr(hw->mac.addr);
2202 } else {
2203 err = hw->mac.ops.init_hw(hw);
2204 if (err) {
2205 printk(KERN_ERR "init_shared_code failed: %d\n", err);
2206 goto out;
2207 }
2208 }
2209
2210 /* Enable dynamic interrupt throttling rates */
2211 adapter->eitr_param = 20000;
2212 adapter->itr_setting = 1;
2213
2214 /* set defaults for eitr in MegaBytes */
2215 adapter->eitr_low = 10;
2216 adapter->eitr_high = 20;
2217
2218 /* set default ring sizes */
2219 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2220 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2221
2222 /* enable rx csum by default */
2223 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
2224
2225 set_bit(__IXGBEVF_DOWN, &adapter->state);
2226
2227out:
2228 return err;
2229}
2230
2231static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2232{
2233 struct ixgbe_hw *hw = &adapter->hw;
2234
2235 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2236 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2237 adapter->stats.last_vfgorc |=
2238 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2239 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2240 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2241 adapter->stats.last_vfgotc |=
2242 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2243 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2244
2245 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2246 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2247 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2248 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2249 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2250}
2251
2252#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2253 { \
2254 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2255 if (current_counter < last_counter) \
2256 counter += 0x100000000LL; \
2257 last_counter = current_counter; \
2258 counter &= 0xFFFFFFFF00000000LL; \
2259 counter |= current_counter; \
2260 }
2261
2262#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2263 { \
2264 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2265 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2266 u64 current_counter = (current_counter_msb << 32) | \
2267 current_counter_lsb; \
2268 if (current_counter < last_counter) \
2269 counter += 0x1000000000LL; \
2270 last_counter = current_counter; \
2271 counter &= 0xFFFFFFF000000000LL; \
2272 counter |= current_counter; \
2273 }
2274/**
2275 * ixgbevf_update_stats - Update the board statistics counters.
2276 * @adapter: board private structure
2277 **/
2278void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2279{
2280 struct ixgbe_hw *hw = &adapter->hw;
2281
2282 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2283 adapter->stats.vfgprc);
2284 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2285 adapter->stats.vfgptc);
2286 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2287 adapter->stats.last_vfgorc,
2288 adapter->stats.vfgorc);
2289 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2290 adapter->stats.last_vfgotc,
2291 adapter->stats.vfgotc);
2292 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2293 adapter->stats.vfmprc);
2294
2295 /* Fill out the OS statistics structure */
2296 adapter->net_stats.multicast = adapter->stats.vfmprc -
2297 adapter->stats.base_vfmprc;
2298}
2299
2300/**
2301 * ixgbevf_watchdog - Timer Call-back
2302 * @data: pointer to adapter cast into an unsigned long
2303 **/
2304static void ixgbevf_watchdog(unsigned long data)
2305{
2306 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2307 struct ixgbe_hw *hw = &adapter->hw;
2308 u64 eics = 0;
2309 int i;
2310
2311 /*
2312 * Do the watchdog outside of interrupt context due to the lovely
2313 * delays that some of the newer hardware requires
2314 */
2315
2316 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2317 goto watchdog_short_circuit;
2318
2319 /* get one bit for every active tx/rx interrupt vector */
2320 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2321 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2322 if (qv->rxr_count || qv->txr_count)
2323 eics |= (1 << i);
2324 }
2325
2326 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics);
2327
2328watchdog_short_circuit:
2329 schedule_work(&adapter->watchdog_task);
2330}
2331
2332/**
2333 * ixgbevf_tx_timeout - Respond to a Tx Hang
2334 * @netdev: network interface device structure
2335 **/
2336static void ixgbevf_tx_timeout(struct net_device *netdev)
2337{
2338 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2339
2340 /* Do the reset outside of interrupt context */
2341 schedule_work(&adapter->reset_task);
2342}
2343
2344static void ixgbevf_reset_task(struct work_struct *work)
2345{
2346 struct ixgbevf_adapter *adapter;
2347 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2348
2349 /* If we're already down or resetting, just bail */
2350 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2351 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2352 return;
2353
2354 adapter->tx_timeout_count++;
2355
2356 ixgbevf_reinit_locked(adapter);
2357}
2358
2359/**
2360 * ixgbevf_watchdog_task - worker thread to bring link up
2361 * @work: pointer to work_struct containing our data
2362 **/
2363static void ixgbevf_watchdog_task(struct work_struct *work)
2364{
2365 struct ixgbevf_adapter *adapter = container_of(work,
2366 struct ixgbevf_adapter,
2367 watchdog_task);
2368 struct net_device *netdev = adapter->netdev;
2369 struct ixgbe_hw *hw = &adapter->hw;
2370 u32 link_speed = adapter->link_speed;
2371 bool link_up = adapter->link_up;
2372
2373 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2374
2375 /*
2376 * Always check the link on the watchdog because we have
2377 * no LSC interrupt
2378 */
2379 if (hw->mac.ops.check_link) {
2380 if ((hw->mac.ops.check_link(hw, &link_speed,
2381 &link_up, false)) != 0) {
2382 adapter->link_up = link_up;
2383 adapter->link_speed = link_speed;
2384 netif_carrier_off(netdev);
2385 netif_tx_stop_all_queues(netdev);
2386 schedule_work(&adapter->reset_task);
2387 goto pf_has_reset;
2388 }
2389 } else {
2390 /* always assume link is up, if no check link
2391 * function */
2392 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
2393 link_up = true;
2394 }
2395 adapter->link_up = link_up;
2396 adapter->link_speed = link_speed;
2397
2398 if (link_up) {
2399 if (!netif_carrier_ok(netdev)) {
2400 hw_dbg(&adapter->hw, "NIC Link is Up %s, ",
2401 ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2402 "10 Gbps" : "1 Gbps"));
2403 netif_carrier_on(netdev);
2404 netif_tx_wake_all_queues(netdev);
2405 } else {
2406 /* Force detection of hung controller */
2407 adapter->detect_tx_hung = true;
2408 }
2409 } else {
2410 adapter->link_up = false;
2411 adapter->link_speed = 0;
2412 if (netif_carrier_ok(netdev)) {
2413 hw_dbg(&adapter->hw, "NIC Link is Down\n");
2414 netif_carrier_off(netdev);
2415 netif_tx_stop_all_queues(netdev);
2416 }
2417 }
2418
2419pf_has_reset:
2420 ixgbevf_update_stats(adapter);
2421
2422 /* Force detection of hung controller every watchdog period */
2423 adapter->detect_tx_hung = true;
2424
2425 /* Reset the timer */
2426 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2427 mod_timer(&adapter->watchdog_timer,
2428 round_jiffies(jiffies + (2 * HZ)));
2429
2430 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2431}
2432
2433/**
2434 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2435 * @adapter: board private structure
2436 * @tx_ring: Tx descriptor ring for a specific queue
2437 *
2438 * Free all transmit software resources
2439 **/
2440void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2441 struct ixgbevf_ring *tx_ring)
2442{
2443 struct pci_dev *pdev = adapter->pdev;
2444
2445 ixgbevf_clean_tx_ring(adapter, tx_ring);
2446
2447 vfree(tx_ring->tx_buffer_info);
2448 tx_ring->tx_buffer_info = NULL;
2449
2450 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2451
2452 tx_ring->desc = NULL;
2453}
2454
2455/**
2456 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2457 * @adapter: board private structure
2458 *
2459 * Free all transmit software resources
2460 **/
2461static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2462{
2463 int i;
2464
2465 for (i = 0; i < adapter->num_tx_queues; i++)
2466 if (adapter->tx_ring[i].desc)
2467 ixgbevf_free_tx_resources(adapter,
2468 &adapter->tx_ring[i]);
2469
2470}
2471
2472/**
2473 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2474 * @adapter: board private structure
2475 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2476 *
2477 * Return 0 on success, negative on failure
2478 **/
2479int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2480 struct ixgbevf_ring *tx_ring)
2481{
2482 struct pci_dev *pdev = adapter->pdev;
2483 int size;
2484
2485 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2486 tx_ring->tx_buffer_info = vmalloc(size);
2487 if (!tx_ring->tx_buffer_info)
2488 goto err;
2489 memset(tx_ring->tx_buffer_info, 0, size);
2490
2491 /* round up to nearest 4K */
2492 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2493 tx_ring->size = ALIGN(tx_ring->size, 4096);
2494
2495 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
2496 &tx_ring->dma);
2497 if (!tx_ring->desc)
2498 goto err;
2499
2500 tx_ring->next_to_use = 0;
2501 tx_ring->next_to_clean = 0;
2502 tx_ring->work_limit = tx_ring->count;
2503 return 0;
2504
2505err:
2506 vfree(tx_ring->tx_buffer_info);
2507 tx_ring->tx_buffer_info = NULL;
2508 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2509 "descriptor ring\n");
2510 return -ENOMEM;
2511}
2512
2513/**
2514 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2515 * @adapter: board private structure
2516 *
2517 * If this function returns with an error, then it's possible one or
2518 * more of the rings is populated (while the rest are not). It is the
2519 * callers duty to clean those orphaned rings.
2520 *
2521 * Return 0 on success, negative on failure
2522 **/
2523static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2524{
2525 int i, err = 0;
2526
2527 for (i = 0; i < adapter->num_tx_queues; i++) {
2528 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2529 if (!err)
2530 continue;
2531 hw_dbg(&adapter->hw,
2532 "Allocation for Tx Queue %u failed\n", i);
2533 break;
2534 }
2535
2536 return err;
2537}
2538
2539/**
2540 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2541 * @adapter: board private structure
2542 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2543 *
2544 * Returns 0 on success, negative on failure
2545 **/
2546int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2547 struct ixgbevf_ring *rx_ring)
2548{
2549 struct pci_dev *pdev = adapter->pdev;
2550 int size;
2551
2552 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2553 rx_ring->rx_buffer_info = vmalloc(size);
2554 if (!rx_ring->rx_buffer_info) {
2555 hw_dbg(&adapter->hw,
2556 "Unable to vmalloc buffer memory for "
2557 "the receive descriptor ring\n");
2558 goto alloc_failed;
2559 }
2560 memset(rx_ring->rx_buffer_info, 0, size);
2561
2562 /* Round up to nearest 4K */
2563 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2564 rx_ring->size = ALIGN(rx_ring->size, 4096);
2565
2566 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
2567 &rx_ring->dma);
2568
2569 if (!rx_ring->desc) {
2570 hw_dbg(&adapter->hw,
2571 "Unable to allocate memory for "
2572 "the receive descriptor ring\n");
2573 vfree(rx_ring->rx_buffer_info);
2574 rx_ring->rx_buffer_info = NULL;
2575 goto alloc_failed;
2576 }
2577
2578 rx_ring->next_to_clean = 0;
2579 rx_ring->next_to_use = 0;
2580
2581 return 0;
2582alloc_failed:
2583 return -ENOMEM;
2584}
2585
2586/**
2587 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2588 * @adapter: board private structure
2589 *
2590 * If this function returns with an error, then it's possible one or
2591 * more of the rings is populated (while the rest are not). It is the
2592 * callers duty to clean those orphaned rings.
2593 *
2594 * Return 0 on success, negative on failure
2595 **/
2596static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2597{
2598 int i, err = 0;
2599
2600 for (i = 0; i < adapter->num_rx_queues; i++) {
2601 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2602 if (!err)
2603 continue;
2604 hw_dbg(&adapter->hw,
2605 "Allocation for Rx Queue %u failed\n", i);
2606 break;
2607 }
2608 return err;
2609}
2610
2611/**
2612 * ixgbevf_free_rx_resources - Free Rx Resources
2613 * @adapter: board private structure
2614 * @rx_ring: ring to clean the resources from
2615 *
2616 * Free all receive software resources
2617 **/
2618void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2619 struct ixgbevf_ring *rx_ring)
2620{
2621 struct pci_dev *pdev = adapter->pdev;
2622
2623 ixgbevf_clean_rx_ring(adapter, rx_ring);
2624
2625 vfree(rx_ring->rx_buffer_info);
2626 rx_ring->rx_buffer_info = NULL;
2627
2628 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2629
2630 rx_ring->desc = NULL;
2631}
2632
2633/**
2634 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2635 * @adapter: board private structure
2636 *
2637 * Free all receive software resources
2638 **/
2639static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2640{
2641 int i;
2642
2643 for (i = 0; i < adapter->num_rx_queues; i++)
2644 if (adapter->rx_ring[i].desc)
2645 ixgbevf_free_rx_resources(adapter,
2646 &adapter->rx_ring[i]);
2647}
2648
2649/**
2650 * ixgbevf_open - Called when a network interface is made active
2651 * @netdev: network interface device structure
2652 *
2653 * Returns 0 on success, negative value on failure
2654 *
2655 * The open entry point is called when a network interface is made
2656 * active by the system (IFF_UP). At this point all resources needed
2657 * for transmit and receive operations are allocated, the interrupt
2658 * handler is registered with the OS, the watchdog timer is started,
2659 * and the stack is notified that the interface is ready.
2660 **/
2661static int ixgbevf_open(struct net_device *netdev)
2662{
2663 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2664 struct ixgbe_hw *hw = &adapter->hw;
2665 int err;
2666
2667 /* disallow open during test */
2668 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2669 return -EBUSY;
2670
2671 if (hw->adapter_stopped) {
2672 ixgbevf_reset(adapter);
2673 /* if adapter is still stopped then PF isn't up and
2674 * the vf can't start. */
2675 if (hw->adapter_stopped) {
2676 err = IXGBE_ERR_MBX;
2677 printk(KERN_ERR "Unable to start - perhaps the PF"
2678 "Driver isn't up yet\n");
2679 goto err_setup_reset;
2680 }
2681 }
2682
2683 /* allocate transmit descriptors */
2684 err = ixgbevf_setup_all_tx_resources(adapter);
2685 if (err)
2686 goto err_setup_tx;
2687
2688 /* allocate receive descriptors */
2689 err = ixgbevf_setup_all_rx_resources(adapter);
2690 if (err)
2691 goto err_setup_rx;
2692
2693 ixgbevf_configure(adapter);
2694
2695 /*
2696 * Map the Tx/Rx rings to the vectors we were allotted.
2697 * if request_irq will be called in this function map_rings
2698 * must be called *before* up_complete
2699 */
2700 ixgbevf_map_rings_to_vectors(adapter);
2701
2702 err = ixgbevf_up_complete(adapter);
2703 if (err)
2704 goto err_up;
2705
2706 /* clear any pending interrupts, may auto mask */
2707 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2708 err = ixgbevf_request_irq(adapter);
2709 if (err)
2710 goto err_req_irq;
2711
2712 ixgbevf_irq_enable(adapter, true, true);
2713
2714 return 0;
2715
2716err_req_irq:
2717 ixgbevf_down(adapter);
2718err_up:
2719 ixgbevf_free_irq(adapter);
2720err_setup_rx:
2721 ixgbevf_free_all_rx_resources(adapter);
2722err_setup_tx:
2723 ixgbevf_free_all_tx_resources(adapter);
2724 ixgbevf_reset(adapter);
2725
2726err_setup_reset:
2727
2728 return err;
2729}
2730
2731/**
2732 * ixgbevf_close - Disables a network interface
2733 * @netdev: network interface device structure
2734 *
2735 * Returns 0, this is not allowed to fail
2736 *
2737 * The close entry point is called when an interface is de-activated
2738 * by the OS. The hardware is still under the drivers control, but
2739 * needs to be disabled. A global MAC reset is issued to stop the
2740 * hardware, and all transmit and receive resources are freed.
2741 **/
2742static int ixgbevf_close(struct net_device *netdev)
2743{
2744 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2745
2746 ixgbevf_down(adapter);
2747 ixgbevf_free_irq(adapter);
2748
2749 ixgbevf_free_all_tx_resources(adapter);
2750 ixgbevf_free_all_rx_resources(adapter);
2751
2752 return 0;
2753}
2754
2755static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
2756 struct ixgbevf_ring *tx_ring,
2757 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2758{
2759 struct ixgbe_adv_tx_context_desc *context_desc;
2760 unsigned int i;
2761 int err;
2762 struct ixgbevf_tx_buffer *tx_buffer_info;
2763 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
2764 u32 mss_l4len_idx, l4len;
2765
2766 if (skb_is_gso(skb)) {
2767 if (skb_header_cloned(skb)) {
2768 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2769 if (err)
2770 return err;
2771 }
2772 l4len = tcp_hdrlen(skb);
2773 *hdr_len += l4len;
2774
2775 if (skb->protocol == htons(ETH_P_IP)) {
2776 struct iphdr *iph = ip_hdr(skb);
2777 iph->tot_len = 0;
2778 iph->check = 0;
2779 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2780 iph->daddr, 0,
2781 IPPROTO_TCP,
2782 0);
2783 adapter->hw_tso_ctxt++;
2784 } else if (skb_is_gso_v6(skb)) {
2785 ipv6_hdr(skb)->payload_len = 0;
2786 tcp_hdr(skb)->check =
2787 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2788 &ipv6_hdr(skb)->daddr,
2789 0, IPPROTO_TCP, 0);
2790 adapter->hw_tso6_ctxt++;
2791 }
2792
2793 i = tx_ring->next_to_use;
2794
2795 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2796 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
2797
2798 /* VLAN MACLEN IPLEN */
2799 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2800 vlan_macip_lens |=
2801 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
2802 vlan_macip_lens |= ((skb_network_offset(skb)) <<
2803 IXGBE_ADVTXD_MACLEN_SHIFT);
2804 *hdr_len += skb_network_offset(skb);
2805 vlan_macip_lens |=
2806 (skb_transport_header(skb) - skb_network_header(skb));
2807 *hdr_len +=
2808 (skb_transport_header(skb) - skb_network_header(skb));
2809 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2810 context_desc->seqnum_seed = 0;
2811
2812 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2813 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
2814 IXGBE_ADVTXD_DTYP_CTXT);
2815
2816 if (skb->protocol == htons(ETH_P_IP))
2817 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2818 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2819 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2820
2821 /* MSS L4LEN IDX */
2822 mss_l4len_idx =
2823 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
2824 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
2825 /* use index 1 for TSO */
2826 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2827 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2828
2829 tx_buffer_info->time_stamp = jiffies;
2830 tx_buffer_info->next_to_watch = i;
2831
2832 i++;
2833 if (i == tx_ring->count)
2834 i = 0;
2835 tx_ring->next_to_use = i;
2836
2837 return true;
2838 }
2839
2840 return false;
2841}
2842
2843static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
2844 struct ixgbevf_ring *tx_ring,
2845 struct sk_buff *skb, u32 tx_flags)
2846{
2847 struct ixgbe_adv_tx_context_desc *context_desc;
2848 unsigned int i;
2849 struct ixgbevf_tx_buffer *tx_buffer_info;
2850 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2851
2852 if (skb->ip_summed == CHECKSUM_PARTIAL ||
2853 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
2854 i = tx_ring->next_to_use;
2855 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2856 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
2857
2858 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2859 vlan_macip_lens |= (tx_flags &
2860 IXGBE_TX_FLAGS_VLAN_MASK);
2861 vlan_macip_lens |= (skb_network_offset(skb) <<
2862 IXGBE_ADVTXD_MACLEN_SHIFT);
2863 if (skb->ip_summed == CHECKSUM_PARTIAL)
2864 vlan_macip_lens |= (skb_transport_header(skb) -
2865 skb_network_header(skb));
2866
2867 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2868 context_desc->seqnum_seed = 0;
2869
2870 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
2871 IXGBE_ADVTXD_DTYP_CTXT);
2872
2873 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2874 switch (skb->protocol) {
2875 case __constant_htons(ETH_P_IP):
2876 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2877 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2878 type_tucmd_mlhl |=
2879 IXGBE_ADVTXD_TUCMD_L4T_TCP;
2880 break;
2881 case __constant_htons(ETH_P_IPV6):
2882 /* XXX what about other V6 headers?? */
2883 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2884 type_tucmd_mlhl |=
2885 IXGBE_ADVTXD_TUCMD_L4T_TCP;
2886 break;
2887 default:
2888 if (unlikely(net_ratelimit())) {
2889 printk(KERN_WARNING
2890 "partial checksum but "
2891 "proto=%x!\n",
2892 skb->protocol);
2893 }
2894 break;
2895 }
2896 }
2897
2898 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2899 /* use index zero for tx checksum offload */
2900 context_desc->mss_l4len_idx = 0;
2901
2902 tx_buffer_info->time_stamp = jiffies;
2903 tx_buffer_info->next_to_watch = i;
2904
2905 adapter->hw_csum_tx_good++;
2906 i++;
2907 if (i == tx_ring->count)
2908 i = 0;
2909 tx_ring->next_to_use = i;
2910
2911 return true;
2912 }
2913
2914 return false;
2915}
2916
2917static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2918 struct ixgbevf_ring *tx_ring,
2919 struct sk_buff *skb, u32 tx_flags,
2920 unsigned int first)
2921{
2922 struct pci_dev *pdev = adapter->pdev;
2923 struct ixgbevf_tx_buffer *tx_buffer_info;
2924 unsigned int len;
2925 unsigned int total = skb->len;
2926 unsigned int offset = 0, size, count = 0, i;
2927 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2928 unsigned int f;
2929
2930 i = tx_ring->next_to_use;
2931
2932 len = min(skb_headlen(skb), total);
2933 while (len) {
2934 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2935 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2936
2937 tx_buffer_info->length = size;
2938 tx_buffer_info->mapped_as_page = false;
2939 tx_buffer_info->dma = pci_map_single(adapter->pdev,
2940 skb->data + offset,
2941 size, PCI_DMA_TODEVICE);
2942 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
2943 goto dma_error;
2944 tx_buffer_info->time_stamp = jiffies;
2945 tx_buffer_info->next_to_watch = i;
2946
2947 len -= size;
2948 total -= size;
2949 offset += size;
2950 count++;
2951 i++;
2952 if (i == tx_ring->count)
2953 i = 0;
2954 }
2955
2956 for (f = 0; f < nr_frags; f++) {
2957 struct skb_frag_struct *frag;
2958
2959 frag = &skb_shinfo(skb)->frags[f];
2960 len = min((unsigned int)frag->size, total);
2961 offset = frag->page_offset;
2962
2963 while (len) {
2964 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2965 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2966
2967 tx_buffer_info->length = size;
2968 tx_buffer_info->dma = pci_map_page(adapter->pdev,
2969 frag->page,
2970 offset,
2971 size,
2972 PCI_DMA_TODEVICE);
2973 tx_buffer_info->mapped_as_page = true;
2974 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
2975 goto dma_error;
2976 tx_buffer_info->time_stamp = jiffies;
2977 tx_buffer_info->next_to_watch = i;
2978
2979 len -= size;
2980 total -= size;
2981 offset += size;
2982 count++;
2983 i++;
2984 if (i == tx_ring->count)
2985 i = 0;
2986 }
2987 if (total == 0)
2988 break;
2989 }
2990
2991 if (i == 0)
2992 i = tx_ring->count - 1;
2993 else
2994 i = i - 1;
2995 tx_ring->tx_buffer_info[i].skb = skb;
2996 tx_ring->tx_buffer_info[first].next_to_watch = i;
2997
2998 return count;
2999
3000dma_error:
3001 dev_err(&pdev->dev, "TX DMA map failed\n");
3002
3003 /* clear timestamp and dma mappings for failed tx_buffer_info map */
3004 tx_buffer_info->dma = 0;
3005 tx_buffer_info->time_stamp = 0;
3006 tx_buffer_info->next_to_watch = 0;
3007 count--;
3008
3009 /* clear timestamp and dma mappings for remaining portion of packet */
3010 while (count >= 0) {
3011 count--;
3012 i--;
3013 if (i < 0)
3014 i += tx_ring->count;
3015 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3016 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
3017 }
3018
3019 return count;
3020}
3021
3022static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
3023 struct ixgbevf_ring *tx_ring, int tx_flags,
3024 int count, u32 paylen, u8 hdr_len)
3025{
3026 union ixgbe_adv_tx_desc *tx_desc = NULL;
3027 struct ixgbevf_tx_buffer *tx_buffer_info;
3028 u32 olinfo_status = 0, cmd_type_len = 0;
3029 unsigned int i;
3030
3031 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
3032
3033 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
3034
3035 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
3036
3037 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3038 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
3039
3040 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
3041 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3042
3043 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3044 IXGBE_ADVTXD_POPTS_SHIFT;
3045
3046 /* use index 1 context for tso */
3047 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3048 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3049 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
3050 IXGBE_ADVTXD_POPTS_SHIFT;
3051
3052 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3053 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3054 IXGBE_ADVTXD_POPTS_SHIFT;
3055
3056 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3057
3058 i = tx_ring->next_to_use;
3059 while (count--) {
3060 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3061 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
3062 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3063 tx_desc->read.cmd_type_len =
3064 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3065 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3066 i++;
3067 if (i == tx_ring->count)
3068 i = 0;
3069 }
3070
3071 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3072
3073 /*
3074 * Force memory writes to complete before letting h/w
3075 * know there are new descriptors to fetch. (Only
3076 * applicable for weak-ordered memory model archs,
3077 * such as IA-64).
3078 */
3079 wmb();
3080
3081 tx_ring->next_to_use = i;
3082 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3083}
3084
3085static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
3086 struct ixgbevf_ring *tx_ring, int size)
3087{
3088 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3089
3090 netif_stop_subqueue(netdev, tx_ring->queue_index);
3091 /* Herbert's original patch had:
3092 * smp_mb__after_netif_stop_queue();
3093 * but since that doesn't exist yet, just open code it. */
3094 smp_mb();
3095
3096 /* We need to check again in a case another CPU has just
3097 * made room available. */
3098 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
3099 return -EBUSY;
3100
3101 /* A reprieve! - use start_queue because it doesn't call schedule */
3102 netif_start_subqueue(netdev, tx_ring->queue_index);
3103 ++adapter->restart_queue;
3104 return 0;
3105}
3106
3107static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
3108 struct ixgbevf_ring *tx_ring, int size)
3109{
3110 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3111 return 0;
3112 return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
3113}
3114
3115static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3116{
3117 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3118 struct ixgbevf_ring *tx_ring;
3119 unsigned int first;
3120 unsigned int tx_flags = 0;
3121 u8 hdr_len = 0;
3122 int r_idx = 0, tso;
3123 int count = 0;
3124
3125 unsigned int f;
3126
3127 tx_ring = &adapter->tx_ring[r_idx];
3128
3129 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3130 tx_flags |= vlan_tx_tag_get(skb);
3131 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3132 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3133 }
3134
3135 /* four things can cause us to need a context descriptor */
3136 if (skb_is_gso(skb) ||
3137 (skb->ip_summed == CHECKSUM_PARTIAL) ||
3138 (tx_flags & IXGBE_TX_FLAGS_VLAN))
3139 count++;
3140
3141 count += TXD_USE_COUNT(skb_headlen(skb));
3142 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3143 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3144
3145 if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
3146 adapter->tx_busy++;
3147 return NETDEV_TX_BUSY;
3148 }
3149
3150 first = tx_ring->next_to_use;
3151
3152 if (skb->protocol == htons(ETH_P_IP))
3153 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3154 tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
3155 if (tso < 0) {
3156 dev_kfree_skb_any(skb);
3157 return NETDEV_TX_OK;
3158 }
3159
3160 if (tso)
3161 tx_flags |= IXGBE_TX_FLAGS_TSO;
3162 else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
3163 (skb->ip_summed == CHECKSUM_PARTIAL))
3164 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3165
3166 ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
3167 ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
3168 skb->len, hdr_len);
3169
3170 netdev->trans_start = jiffies;
3171
3172 ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
3173
3174 return NETDEV_TX_OK;
3175}
3176
3177/**
3178 * ixgbevf_get_stats - Get System Network Statistics
3179 * @netdev: network interface device structure
3180 *
3181 * Returns the address of the device statistics structure.
3182 * The statistics are actually updated from the timer callback.
3183 **/
3184static struct net_device_stats *ixgbevf_get_stats(struct net_device *netdev)
3185{
3186 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3187
3188 /* only return the current stats */
3189 return &adapter->net_stats;
3190}
3191
3192/**
3193 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3194 * @netdev: network interface device structure
3195 * @p: pointer to an address structure
3196 *
3197 * Returns 0 on success, negative on failure
3198 **/
3199static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3200{
3201 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3202 struct ixgbe_hw *hw = &adapter->hw;
3203 struct sockaddr *addr = p;
3204
3205 if (!is_valid_ether_addr(addr->sa_data))
3206 return -EADDRNOTAVAIL;
3207
3208 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3209 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3210
3211 if (hw->mac.ops.set_rar)
3212 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3213
3214 return 0;
3215}
3216
3217/**
3218 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3219 * @netdev: network interface device structure
3220 * @new_mtu: new value for maximum frame size
3221 *
3222 * Returns 0 on success, negative on failure
3223 **/
3224static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3225{
3226 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3227 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3228
3229 /* MTU < 68 is an error and causes problems on some kernels */
3230 if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
3231 return -EINVAL;
3232
3233 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3234 netdev->mtu, new_mtu);
3235 /* must set new MTU before calling down or up */
3236 netdev->mtu = new_mtu;
3237
3238 if (netif_running(netdev))
3239 ixgbevf_reinit_locked(adapter);
3240
3241 return 0;
3242}
3243
3244static void ixgbevf_shutdown(struct pci_dev *pdev)
3245{
3246 struct net_device *netdev = pci_get_drvdata(pdev);
3247 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3248
3249 netif_device_detach(netdev);
3250
3251 if (netif_running(netdev)) {
3252 ixgbevf_down(adapter);
3253 ixgbevf_free_irq(adapter);
3254 ixgbevf_free_all_tx_resources(adapter);
3255 ixgbevf_free_all_rx_resources(adapter);
3256 }
3257
3258#ifdef CONFIG_PM
3259 pci_save_state(pdev);
3260#endif
3261
3262 pci_disable_device(pdev);
3263}
3264
3265static const struct net_device_ops ixgbe_netdev_ops = {
3266 .ndo_open = &ixgbevf_open,
3267 .ndo_stop = &ixgbevf_close,
3268 .ndo_start_xmit = &ixgbevf_xmit_frame,
3269 .ndo_get_stats = &ixgbevf_get_stats,
3270 .ndo_set_rx_mode = &ixgbevf_set_rx_mode,
3271 .ndo_set_multicast_list = &ixgbevf_set_rx_mode,
3272 .ndo_validate_addr = eth_validate_addr,
3273 .ndo_set_mac_address = &ixgbevf_set_mac,
3274 .ndo_change_mtu = &ixgbevf_change_mtu,
3275 .ndo_tx_timeout = &ixgbevf_tx_timeout,
3276 .ndo_vlan_rx_register = &ixgbevf_vlan_rx_register,
3277 .ndo_vlan_rx_add_vid = &ixgbevf_vlan_rx_add_vid,
3278 .ndo_vlan_rx_kill_vid = &ixgbevf_vlan_rx_kill_vid,
3279};
3280
3281static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3282{
3283 struct ixgbevf_adapter *adapter;
3284 adapter = netdev_priv(dev);
3285 dev->netdev_ops = &ixgbe_netdev_ops;
3286 ixgbevf_set_ethtool_ops(dev);
3287 dev->watchdog_timeo = 5 * HZ;
3288}
3289
3290/**
3291 * ixgbevf_probe - Device Initialization Routine
3292 * @pdev: PCI device information struct
3293 * @ent: entry in ixgbevf_pci_tbl
3294 *
3295 * Returns 0 on success, negative on failure
3296 *
3297 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3298 * The OS initialization, configuring of the adapter private structure,
3299 * and a hardware reset occur.
3300 **/
3301static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3302 const struct pci_device_id *ent)
3303{
3304 struct net_device *netdev;
3305 struct ixgbevf_adapter *adapter = NULL;
3306 struct ixgbe_hw *hw = NULL;
3307 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3308 static int cards_found;
3309 int err, pci_using_dac;
3310
3311 err = pci_enable_device(pdev);
3312 if (err)
3313 return err;
3314
3315 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
3316 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
3317 pci_using_dac = 1;
3318 } else {
3319 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3320 if (err) {
3321 err = pci_set_consistent_dma_mask(pdev,
3322 DMA_BIT_MASK(32));
3323 if (err) {
3324 dev_err(&pdev->dev, "No usable DMA "
3325 "configuration, aborting\n");
3326 goto err_dma;
3327 }
3328 }
3329 pci_using_dac = 0;
3330 }
3331
3332 err = pci_request_regions(pdev, ixgbevf_driver_name);
3333 if (err) {
3334 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3335 goto err_pci_reg;
3336 }
3337
3338 pci_set_master(pdev);
3339
3340#ifdef HAVE_TX_MQ
3341 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3342 MAX_TX_QUEUES);
3343#else
3344 netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
3345#endif
3346 if (!netdev) {
3347 err = -ENOMEM;
3348 goto err_alloc_etherdev;
3349 }
3350
3351 SET_NETDEV_DEV(netdev, &pdev->dev);
3352
3353 pci_set_drvdata(pdev, netdev);
3354 adapter = netdev_priv(netdev);
3355
3356 adapter->netdev = netdev;
3357 adapter->pdev = pdev;
3358 hw = &adapter->hw;
3359 hw->back = adapter;
3360 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3361
3362 /*
3363 * call save state here in standalone driver because it relies on
3364 * adapter struct to exist, and needs to call netdev_priv
3365 */
3366 pci_save_state(pdev);
3367
3368 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3369 pci_resource_len(pdev, 0));
3370 if (!hw->hw_addr) {
3371 err = -EIO;
3372 goto err_ioremap;
3373 }
3374
3375 ixgbevf_assign_netdev_ops(netdev);
3376
3377 adapter->bd_number = cards_found;
3378
3379 /* Setup hw api */
3380 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3381 hw->mac.type = ii->mac;
3382
3383 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3384 sizeof(struct ixgbe_mac_operations));
3385
3386 adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
3387 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3388 adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
3389
3390 /* setup the private structure */
3391 err = ixgbevf_sw_init(adapter);
3392
3393 ixgbevf_init_last_counter_stats(adapter);
3394
3395#ifdef MAX_SKB_FRAGS
3396 netdev->features = NETIF_F_SG |
3397 NETIF_F_IP_CSUM |
3398 NETIF_F_HW_VLAN_TX |
3399 NETIF_F_HW_VLAN_RX |
3400 NETIF_F_HW_VLAN_FILTER;
3401
3402 netdev->features |= NETIF_F_IPV6_CSUM;
3403 netdev->features |= NETIF_F_TSO;
3404 netdev->features |= NETIF_F_TSO6;
3405 netdev->vlan_features |= NETIF_F_TSO;
3406 netdev->vlan_features |= NETIF_F_TSO6;
3407 netdev->vlan_features |= NETIF_F_IP_CSUM;
3408 netdev->vlan_features |= NETIF_F_SG;
3409
3410 if (pci_using_dac)
3411 netdev->features |= NETIF_F_HIGHDMA;
3412
3413#endif /* MAX_SKB_FRAGS */
3414
3415 /* The HW MAC address was set and/or determined in sw_init */
3416 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
3417 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
3418
3419 if (!is_valid_ether_addr(netdev->dev_addr)) {
3420 printk(KERN_ERR "invalid MAC address\n");
3421 err = -EIO;
3422 goto err_sw_init;
3423 }
3424
3425 init_timer(&adapter->watchdog_timer);
3426 adapter->watchdog_timer.function = &ixgbevf_watchdog;
3427 adapter->watchdog_timer.data = (unsigned long)adapter;
3428
3429 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3430 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3431
3432 err = ixgbevf_init_interrupt_scheme(adapter);
3433 if (err)
3434 goto err_sw_init;
3435
3436 /* pick up the PCI bus settings for reporting later */
3437 if (hw->mac.ops.get_bus_info)
3438 hw->mac.ops.get_bus_info(hw);
3439
3440
3441 netif_carrier_off(netdev);
3442 netif_tx_stop_all_queues(netdev);
3443
3444 strcpy(netdev->name, "eth%d");
3445
3446 err = register_netdev(netdev);
3447 if (err)
3448 goto err_register;
3449
3450 adapter->netdev_registered = true;
3451
3452 /* print the MAC address */
3453 hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
3454 netdev->dev_addr[0],
3455 netdev->dev_addr[1],
3456 netdev->dev_addr[2],
3457 netdev->dev_addr[3],
3458 netdev->dev_addr[4],
3459 netdev->dev_addr[5]);
3460
3461 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3462
3463 hw_dbg(hw, "LRO is disabled \n");
3464
3465 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3466 cards_found++;
3467 return 0;
3468
3469err_register:
3470err_sw_init:
3471 ixgbevf_reset_interrupt_capability(adapter);
3472 iounmap(hw->hw_addr);
3473err_ioremap:
3474 free_netdev(netdev);
3475err_alloc_etherdev:
3476 pci_release_regions(pdev);
3477err_pci_reg:
3478err_dma:
3479 pci_disable_device(pdev);
3480 return err;
3481}
3482
3483/**
3484 * ixgbevf_remove - Device Removal Routine
3485 * @pdev: PCI device information struct
3486 *
3487 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3488 * that it should release a PCI device. The could be caused by a
3489 * Hot-Plug event, or because the driver is going to be removed from
3490 * memory.
3491 **/
3492static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3493{
3494 struct net_device *netdev = pci_get_drvdata(pdev);
3495 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3496
3497 set_bit(__IXGBEVF_DOWN, &adapter->state);
3498
3499 del_timer_sync(&adapter->watchdog_timer);
3500
3501 cancel_work_sync(&adapter->watchdog_task);
3502
3503 flush_scheduled_work();
3504
3505 if (adapter->netdev_registered) {
3506 unregister_netdev(netdev);
3507 adapter->netdev_registered = false;
3508 }
3509
3510 ixgbevf_reset_interrupt_capability(adapter);
3511
3512 iounmap(adapter->hw.hw_addr);
3513 pci_release_regions(pdev);
3514
3515 hw_dbg(&adapter->hw, "Remove complete\n");
3516
3517 kfree(adapter->tx_ring);
3518 kfree(adapter->rx_ring);
3519
3520 free_netdev(netdev);
3521
3522 pci_disable_device(pdev);
3523}
3524
3525static struct pci_driver ixgbevf_driver = {
3526 .name = ixgbevf_driver_name,
3527 .id_table = ixgbevf_pci_tbl,
3528 .probe = ixgbevf_probe,
3529 .remove = __devexit_p(ixgbevf_remove),
3530 .shutdown = ixgbevf_shutdown,
3531};
3532
3533/**
3534 * ixgbe_init_module - Driver Registration Routine
3535 *
3536 * ixgbe_init_module is the first routine called when the driver is
3537 * loaded. All it does is register with the PCI subsystem.
3538 **/
3539static int __init ixgbevf_init_module(void)
3540{
3541 int ret;
3542 printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string,
3543 ixgbevf_driver_version);
3544
3545 printk(KERN_INFO "%s\n", ixgbevf_copyright);
3546
3547 ret = pci_register_driver(&ixgbevf_driver);
3548 return ret;
3549}
3550
3551module_init(ixgbevf_init_module);
3552
3553/**
3554 * ixgbe_exit_module - Driver Exit Cleanup Routine
3555 *
3556 * ixgbe_exit_module is called just before the driver is removed
3557 * from memory.
3558 **/
3559static void __exit ixgbevf_exit_module(void)
3560{
3561 pci_unregister_driver(&ixgbevf_driver);
3562}
3563
3564#ifdef DEBUG
3565/**
3566 * ixgbe_get_hw_dev_name - return device name string
3567 * used by hardware layer to print debugging information
3568 **/
3569char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3570{
3571 struct ixgbevf_adapter *adapter = hw->back;
3572 return adapter->netdev->name;
3573}
3574
3575#endif
3576module_exit(ixgbevf_exit_module);
3577
3578/* ixgbevf_main.c */
diff --git a/drivers/net/ixgbevf/mbx.c b/drivers/net/ixgbevf/mbx.c
new file mode 100644
index 000000000000..b8143501e6fc
--- /dev/null
+++ b/drivers/net/ixgbevf/mbx.c
@@ -0,0 +1,341 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include "mbx.h"
29
30/**
31 * ixgbevf_poll_for_msg - Wait for message notification
32 * @hw: pointer to the HW structure
33 *
34 * returns 0 if it successfully received a message notification
35 **/
36static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw)
37{
38 struct ixgbe_mbx_info *mbx = &hw->mbx;
39 int countdown = mbx->timeout;
40
41 while (countdown && mbx->ops.check_for_msg(hw)) {
42 countdown--;
43 udelay(mbx->udelay);
44 }
45
46 /* if we failed, all future posted messages fail until reset */
47 if (!countdown)
48 mbx->timeout = 0;
49
50 return countdown ? 0 : IXGBE_ERR_MBX;
51}
52
53/**
54 * ixgbevf_poll_for_ack - Wait for message acknowledgement
55 * @hw: pointer to the HW structure
56 *
57 * returns 0 if it successfully received a message acknowledgement
58 **/
59static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
60{
61 struct ixgbe_mbx_info *mbx = &hw->mbx;
62 int countdown = mbx->timeout;
63
64 while (countdown && mbx->ops.check_for_ack(hw)) {
65 countdown--;
66 udelay(mbx->udelay);
67 }
68
69 /* if we failed, all future posted messages fail until reset */
70 if (!countdown)
71 mbx->timeout = 0;
72
73 return countdown ? 0 : IXGBE_ERR_MBX;
74}
75
76/**
77 * ixgbevf_read_posted_mbx - Wait for message notification and receive message
78 * @hw: pointer to the HW structure
79 * @msg: The message buffer
80 * @size: Length of buffer
81 *
82 * returns 0 if it successfully received a message notification and
83 * copied it into the receive buffer.
84 **/
85static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
86{
87 struct ixgbe_mbx_info *mbx = &hw->mbx;
88 s32 ret_val = IXGBE_ERR_MBX;
89
90 ret_val = ixgbevf_poll_for_msg(hw);
91
92 /* if ack received read message, otherwise we timed out */
93 if (!ret_val)
94 ret_val = mbx->ops.read(hw, msg, size);
95
96 return ret_val;
97}
98
99/**
100 * ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack
101 * @hw: pointer to the HW structure
102 * @msg: The message buffer
103 * @size: Length of buffer
104 *
105 * returns 0 if it successfully copied message into the buffer and
106 * received an ack to that message within delay * timeout period
107 **/
108static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
109{
110 struct ixgbe_mbx_info *mbx = &hw->mbx;
111 s32 ret_val;
112
113 /* send msg */
114 ret_val = mbx->ops.write(hw, msg, size);
115
116 /* if msg sent wait until we receive an ack */
117 if (!ret_val)
118 ret_val = ixgbevf_poll_for_ack(hw);
119
120 return ret_val;
121}
122
123/**
124 * ixgbevf_read_v2p_mailbox - read v2p mailbox
125 * @hw: pointer to the HW structure
126 *
127 * This function is used to read the v2p mailbox without losing the read to
128 * clear status bits.
129 **/
130static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw)
131{
132 u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
133
134 v2p_mailbox |= hw->mbx.v2p_mailbox;
135 hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
136
137 return v2p_mailbox;
138}
139
140/**
141 * ixgbevf_check_for_bit_vf - Determine if a status bit was set
142 * @hw: pointer to the HW structure
143 * @mask: bitmask for bits to be tested and cleared
144 *
145 * This function is used to check for the read to clear bits within
146 * the V2P mailbox.
147 **/
148static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
149{
150 u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw);
151 s32 ret_val = IXGBE_ERR_MBX;
152
153 if (v2p_mailbox & mask)
154 ret_val = 0;
155
156 hw->mbx.v2p_mailbox &= ~mask;
157
158 return ret_val;
159}
160
161/**
162 * ixgbevf_check_for_msg_vf - checks to see if the PF has sent mail
163 * @hw: pointer to the HW structure
164 *
165 * returns 0 if the PF has set the Status bit or else ERR_MBX
166 **/
167static s32 ixgbevf_check_for_msg_vf(struct ixgbe_hw *hw)
168{
169 s32 ret_val = IXGBE_ERR_MBX;
170
171 if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
172 ret_val = 0;
173 hw->mbx.stats.reqs++;
174 }
175
176 return ret_val;
177}
178
179/**
180 * ixgbevf_check_for_ack_vf - checks to see if the PF has ACK'd
181 * @hw: pointer to the HW structure
182 *
183 * returns 0 if the PF has set the ACK bit or else ERR_MBX
184 **/
185static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw)
186{
187 s32 ret_val = IXGBE_ERR_MBX;
188
189 if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
190 ret_val = 0;
191 hw->mbx.stats.acks++;
192 }
193
194 return ret_val;
195}
196
197/**
198 * ixgbevf_check_for_rst_vf - checks to see if the PF has reset
199 * @hw: pointer to the HW structure
200 *
201 * returns true if the PF has set the reset done bit or else false
202 **/
203static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
204{
205 s32 ret_val = IXGBE_ERR_MBX;
206
207 if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
208 IXGBE_VFMAILBOX_RSTI))) {
209 ret_val = 0;
210 hw->mbx.stats.rsts++;
211 }
212
213 return ret_val;
214}
215
216/**
217 * ixgbevf_obtain_mbx_lock_vf - obtain mailbox lock
218 * @hw: pointer to the HW structure
219 *
220 * return 0 if we obtained the mailbox lock
221 **/
222static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
223{
224 s32 ret_val = IXGBE_ERR_MBX;
225
226 /* Take ownership of the buffer */
227 IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
228
229 /* reserve mailbox for vf use */
230 if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
231 ret_val = 0;
232
233 return ret_val;
234}
235
236/**
237 * ixgbevf_write_mbx_vf - Write a message to the mailbox
238 * @hw: pointer to the HW structure
239 * @msg: The message buffer
240 * @size: Length of buffer
241 *
242 * returns 0 if it successfully copied message into the buffer
243 **/
244static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
245{
246 s32 ret_val;
247 u16 i;
248
249
250 /* lock the mailbox to prevent pf/vf race condition */
251 ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
252 if (ret_val)
253 goto out_no_write;
254
255 /* flush msg and acks as we are overwriting the message buffer */
256 ixgbevf_check_for_msg_vf(hw);
257 ixgbevf_check_for_ack_vf(hw);
258
259 /* copy the caller specified message to the mailbox memory buffer */
260 for (i = 0; i < size; i++)
261 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
262
263 /* update stats */
264 hw->mbx.stats.msgs_tx++;
265
266 /* Drop VFU and interrupt the PF to tell it a message has been sent */
267 IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
268
269out_no_write:
270 return ret_val;
271}
272
273/**
274 * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for vf
275 * @hw: pointer to the HW structure
276 * @msg: The message buffer
277 * @size: Length of buffer
278 *
279 * returns 0 if it successfuly read message from buffer
280 **/
281static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
282{
283 s32 ret_val = 0;
284 u16 i;
285
286 /* lock the mailbox to prevent pf/vf race condition */
287 ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
288 if (ret_val)
289 goto out_no_read;
290
291 /* copy the message from the mailbox memory buffer */
292 for (i = 0; i < size; i++)
293 msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
294
295 /* Acknowledge receipt and release mailbox, then we're done */
296 IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
297
298 /* update stats */
299 hw->mbx.stats.msgs_rx++;
300
301out_no_read:
302 return ret_val;
303}
304
305/**
306 * ixgbevf_init_mbx_params_vf - set initial values for vf mailbox
307 * @hw: pointer to the HW structure
308 *
309 * Initializes the hw->mbx struct to correct values for vf mailbox
310 */
311s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
312{
313 struct ixgbe_mbx_info *mbx = &hw->mbx;
314
315 /* start mailbox as timed out and let the reset_hw call set the timeout
316 * value to begin communications */
317 mbx->timeout = 0;
318 mbx->udelay = IXGBE_VF_MBX_INIT_DELAY;
319
320 mbx->size = IXGBE_VFMAILBOX_SIZE;
321
322 mbx->stats.msgs_tx = 0;
323 mbx->stats.msgs_rx = 0;
324 mbx->stats.reqs = 0;
325 mbx->stats.acks = 0;
326 mbx->stats.rsts = 0;
327
328 return 0;
329}
330
331struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
332 .init_params = ixgbevf_init_mbx_params_vf,
333 .read = ixgbevf_read_mbx_vf,
334 .write = ixgbevf_write_mbx_vf,
335 .read_posted = ixgbevf_read_posted_mbx,
336 .write_posted = ixgbevf_write_posted_mbx,
337 .check_for_msg = ixgbevf_check_for_msg_vf,
338 .check_for_ack = ixgbevf_check_for_ack_vf,
339 .check_for_rst = ixgbevf_check_for_rst_vf,
340};
341
diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ixgbevf/mbx.h
new file mode 100644
index 000000000000..1b0e0bf4c0f5
--- /dev/null
+++ b/drivers/net/ixgbevf/mbx.h
@@ -0,0 +1,100 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _IXGBE_MBX_H_
29#define _IXGBE_MBX_H_
30
31#include "vf.h"
32
33#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
34#define IXGBE_ERR_MBX -100
35
36#define IXGBE_VFMAILBOX 0x002FC
37#define IXGBE_VFMBMEM 0x00200
38
39/* Define mailbox register bits */
40#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
41#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */
42#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
43#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
44#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
45#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
46#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */
47#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
48#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
49
50#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
51#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
52
53#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
54#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
55#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
56#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
57#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
58
59#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
60#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
61#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
62#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
63
64
65/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
66 * PF. The reverse is true if it is IXGBE_PF_*.
67 * Message ACK's are the value or'd with 0xF0000000
68 */
69#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
70 * this are the ACK */
71#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
72 * this are the NACK */
73#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
74 * clear to send requests */
75#define IXGBE_VT_MSGINFO_SHIFT 16
76/* bits 23:16 are used for exra info for certain messages */
77#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
78
79#define IXGBE_VF_RESET 0x01 /* VF requests reset */
80#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
81#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
82#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
83#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
84
85/* length of permanent address message returned from PF */
86#define IXGBE_VF_PERMADDR_MSG_LEN 4
87/* word in permanent address message with the current multicast type */
88#define IXGBE_VF_MC_TYPE_WORD 3
89
90#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
91
92#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
93#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
94
95/* forward declaration of the HW struct */
96struct ixgbe_hw;
97
98s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *);
99
100#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ixgbevf/regs.h
new file mode 100644
index 000000000000..12f75960aec1
--- /dev/null
+++ b/drivers/net/ixgbevf/regs.h
@@ -0,0 +1,85 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _IXGBEVF_REGS_H_
29#define _IXGBEVF_REGS_H_
30
31#define IXGBE_VFCTRL 0x00000
32#define IXGBE_VFSTATUS 0x00008
33#define IXGBE_VFLINKS 0x00010
34#define IXGBE_VFRTIMER 0x00048
35#define IXGBE_VFRXMEMWRAP 0x03190
36#define IXGBE_VTEICR 0x00100
37#define IXGBE_VTEICS 0x00104
38#define IXGBE_VTEIMS 0x00108
39#define IXGBE_VTEIMC 0x0010C
40#define IXGBE_VTEIAC 0x00110
41#define IXGBE_VTEIAM 0x00114
42#define IXGBE_VTEITR(x) (0x00820 + (4 * x))
43#define IXGBE_VTIVAR(x) (0x00120 + (4 * x))
44#define IXGBE_VTIVAR_MISC 0x00140
45#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x))
46#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x))
47#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x))
48#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x))
49#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x))
50#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x))
51#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x))
52#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x))
53#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x))
54#define IXGBE_VFPSRTYPE 0x00300
55#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x))
56#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x))
57#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x))
58#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x))
59#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x))
60#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x))
61#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x))
62#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x))
63#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x))
64#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x))
65#define IXGBE_VFGPRC 0x0101C
66#define IXGBE_VFGPTC 0x0201C
67#define IXGBE_VFGORC_LSB 0x01020
68#define IXGBE_VFGORC_MSB 0x01024
69#define IXGBE_VFGOTC_LSB 0x02020
70#define IXGBE_VFGOTC_MSB 0x02024
71#define IXGBE_VFMPRC 0x01034
72
73#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
74
75#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
76
77#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
78 writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
79
80#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
81 readl((a)->hw_addr + (reg) + ((offset) << 2)))
82
83#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
84
85#endif /* _IXGBEVF_REGS_H_ */
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
new file mode 100644
index 000000000000..4b5dec0ec140
--- /dev/null
+++ b/drivers/net/ixgbevf/vf.c
@@ -0,0 +1,387 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include "vf.h"
29
30/**
31 * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
32 * @hw: pointer to hardware structure
33 *
34 * Starts the hardware by filling the bus info structure and media type, clears
35 * all on chip counters, initializes receive address registers, multicast
36 * table, VLAN filter table, calls routine to set up link and flow control
37 * settings, and leaves transmit and receive units disabled and uninitialized
38 **/
39static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
40{
41 /* Clear adapter stopped flag */
42 hw->adapter_stopped = false;
43
44 return 0;
45}
46
47/**
48 * ixgbevf_init_hw_vf - virtual function hardware initialization
49 * @hw: pointer to hardware structure
50 *
51 * Initialize the hardware by resetting the hardware and then starting
52 * the hardware
53 **/
54static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
55{
56 s32 status = hw->mac.ops.start_hw(hw);
57
58 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
59
60 return status;
61}
62
63/**
64 * ixgbevf_reset_hw_vf - Performs hardware reset
65 * @hw: pointer to hardware structure
66 *
67 * Resets the hardware by reseting the transmit and receive units, masks and
68 * clears all interrupts.
69 **/
70static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
71{
72 struct ixgbe_mbx_info *mbx = &hw->mbx;
73 u32 timeout = IXGBE_VF_INIT_TIMEOUT;
74 s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
75 u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
76 u8 *addr = (u8 *)(&msgbuf[1]);
77
78 /* Call adapter stop to disable tx/rx and clear interrupts */
79 hw->mac.ops.stop_adapter(hw);
80
81 IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
82 IXGBE_WRITE_FLUSH(hw);
83
84 /* we cannot reset while the RSTI / RSTD bits are asserted */
85 while (!mbx->ops.check_for_rst(hw) && timeout) {
86 timeout--;
87 udelay(5);
88 }
89
90 if (!timeout)
91 return IXGBE_ERR_RESET_FAILED;
92
93 /* mailbox timeout can now become active */
94 mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
95
96 msgbuf[0] = IXGBE_VF_RESET;
97 mbx->ops.write_posted(hw, msgbuf, 1);
98
99 msleep(10);
100
101 /* set our "perm_addr" based on info provided by PF */
102 /* also set up the mc_filter_type which is piggy backed
103 * on the mac address in word 3 */
104 ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
105 if (ret_val)
106 return ret_val;
107
108 if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
109 return IXGBE_ERR_INVALID_MAC_ADDR;
110
111 memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
112 hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
113
114 return 0;
115}
116
117/**
118 * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
119 * @hw: pointer to hardware structure
120 *
121 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
122 * disables transmit and receive units. The adapter_stopped flag is used by
123 * the shared code and drivers to determine if the adapter is in a stopped
124 * state and should not touch the hardware.
125 **/
126static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
127{
128 u32 number_of_queues;
129 u32 reg_val;
130 u16 i;
131
132 /*
133 * Set the adapter_stopped flag so other driver functions stop touching
134 * the hardware
135 */
136 hw->adapter_stopped = true;
137
138 /* Disable the receive unit by stopped each queue */
139 number_of_queues = hw->mac.max_rx_queues;
140 for (i = 0; i < number_of_queues; i++) {
141 reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
142 if (reg_val & IXGBE_RXDCTL_ENABLE) {
143 reg_val &= ~IXGBE_RXDCTL_ENABLE;
144 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
145 }
146 }
147
148 IXGBE_WRITE_FLUSH(hw);
149
150 /* Clear interrupt mask to stop from interrupts being generated */
151 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
152
153 /* Clear any pending interrupts */
154 IXGBE_READ_REG(hw, IXGBE_VTEICR);
155
156 /* Disable the transmit unit. Each queue must be disabled. */
157 number_of_queues = hw->mac.max_tx_queues;
158 for (i = 0; i < number_of_queues; i++) {
159 reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
160 if (reg_val & IXGBE_TXDCTL_ENABLE) {
161 reg_val &= ~IXGBE_TXDCTL_ENABLE;
162 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
163 }
164 }
165
166 return 0;
167}
168
169/**
170 * ixgbevf_mta_vector - Determines bit-vector in multicast table to set
171 * @hw: pointer to hardware structure
172 * @mc_addr: the multicast address
173 *
174 * Extracts the 12 bits, from a multicast address, to determine which
175 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
176 * incoming rx multicast addresses, to determine the bit-vector to check in
177 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
178 * by the MO field of the MCSTCTRL. The MO field is set during initialization
179 * to mc_filter_type.
180 **/
181static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
182{
183 u32 vector = 0;
184
185 switch (hw->mac.mc_filter_type) {
186 case 0: /* use bits [47:36] of the address */
187 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
188 break;
189 case 1: /* use bits [46:35] of the address */
190 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
191 break;
192 case 2: /* use bits [45:34] of the address */
193 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
194 break;
195 case 3: /* use bits [43:32] of the address */
196 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
197 break;
198 default: /* Invalid mc_filter_type */
199 break;
200 }
201
202 /* vector can only be 12-bits or boundary will be exceeded */
203 vector &= 0xFFF;
204 return vector;
205}
206
207/**
208 * ixgbevf_get_mac_addr_vf - Read device MAC address
209 * @hw: pointer to the HW structure
210 * @mac_addr: pointer to storage for retrieved MAC address
211 **/
212static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
213{
214 memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
215
216 return 0;
217}
218
219/**
220 * ixgbevf_set_rar_vf - set device MAC address
221 * @hw: pointer to hardware structure
222 * @index: Receive address register to write
223 * @addr: Address to put into receive address register
224 * @vmdq: Unused in this implementation
225 **/
226static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
227 u32 vmdq)
228{
229 struct ixgbe_mbx_info *mbx = &hw->mbx;
230 u32 msgbuf[3];
231 u8 *msg_addr = (u8 *)(&msgbuf[1]);
232 s32 ret_val;
233
234 memset(msgbuf, 0, sizeof(msgbuf));
235 msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
236 memcpy(msg_addr, addr, 6);
237 ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
238
239 if (!ret_val)
240 ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
241
242 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
243
244 /* if nacked the address was rejected, use "perm_addr" */
245 if (!ret_val &&
246 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
247 ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
248
249 return ret_val;
250}
251
252/**
253 * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
254 * @hw: pointer to the HW structure
255 * @mc_addr_list: array of multicast addresses to program
256 * @mc_addr_count: number of multicast addresses to program
257 * @next: caller supplied function to return next address in list
258 *
259 * Updates the Multicast Table Array.
260 **/
261static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
262 u32 mc_addr_count,
263 ixgbe_mc_addr_itr next)
264{
265 struct ixgbe_mbx_info *mbx = &hw->mbx;
266 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
267 u16 *vector_list = (u16 *)&msgbuf[1];
268 u32 vector;
269 u32 cnt, i;
270 u32 vmdq;
271
272 /* Each entry in the list uses 1 16 bit word. We have 30
273 * 16 bit words available in our HW msg buffer (minus 1 for the
274 * msg type). That's 30 hash values if we pack 'em right. If
275 * there are more than 30 MC addresses to add then punt the
276 * extras for now and then add code to handle more than 30 later.
277 * It would be unusual for a server to request that many multi-cast
278 * addresses except for in large enterprise network environments.
279 */
280
281 cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
282 msgbuf[0] = IXGBE_VF_SET_MULTICAST;
283 msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
284
285 for (i = 0; i < cnt; i++) {
286 vector = ixgbevf_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
287 vector_list[i] = vector;
288 }
289
290 mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
291
292 return 0;
293}
294
295/**
296 * ixgbevf_set_vfta_vf - Set/Unset vlan filter table address
297 * @hw: pointer to the HW structure
298 * @vlan: 12 bit VLAN ID
299 * @vind: unused by VF drivers
300 * @vlan_on: if true then set bit, else clear bit
301 **/
302static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
303 bool vlan_on)
304{
305 struct ixgbe_mbx_info *mbx = &hw->mbx;
306 u32 msgbuf[2];
307
308 msgbuf[0] = IXGBE_VF_SET_VLAN;
309 msgbuf[1] = vlan;
310 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
311 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
312
313 return mbx->ops.write_posted(hw, msgbuf, 2);
314}
315
316/**
317 * ixgbevf_setup_mac_link_vf - Setup MAC link settings
318 * @hw: pointer to hardware structure
319 * @speed: Unused in this implementation
320 * @autoneg: Unused in this implementation
321 * @autoneg_wait_to_complete: Unused in this implementation
322 *
323 * Do nothing and return success. VF drivers are not allowed to change
324 * global settings. Maintained for driver compatibility.
325 **/
326static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
327 ixgbe_link_speed speed, bool autoneg,
328 bool autoneg_wait_to_complete)
329{
330 return 0;
331}
332
333/**
334 * ixgbevf_check_mac_link_vf - Get link/speed status
335 * @hw: pointer to hardware structure
336 * @speed: pointer to link speed
337 * @link_up: true is link is up, false otherwise
338 * @autoneg_wait_to_complete: true when waiting for completion is needed
339 *
340 * Reads the links register to determine if link is up and the current speed
341 **/
342static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
343 ixgbe_link_speed *speed,
344 bool *link_up,
345 bool autoneg_wait_to_complete)
346{
347 u32 links_reg;
348
349 if (!(hw->mbx.ops.check_for_rst(hw))) {
350 *link_up = false;
351 *speed = 0;
352 return -1;
353 }
354
355 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
356
357 if (links_reg & IXGBE_LINKS_UP)
358 *link_up = true;
359 else
360 *link_up = false;
361
362 if (links_reg & IXGBE_LINKS_SPEED)
363 *speed = IXGBE_LINK_SPEED_10GB_FULL;
364 else
365 *speed = IXGBE_LINK_SPEED_1GB_FULL;
366
367 return 0;
368}
369
370struct ixgbe_mac_operations ixgbevf_mac_ops = {
371 .init_hw = ixgbevf_init_hw_vf,
372 .reset_hw = ixgbevf_reset_hw_vf,
373 .start_hw = ixgbevf_start_hw_vf,
374 .get_mac_addr = ixgbevf_get_mac_addr_vf,
375 .stop_adapter = ixgbevf_stop_hw_vf,
376 .setup_link = ixgbevf_setup_mac_link_vf,
377 .check_link = ixgbevf_check_mac_link_vf,
378 .set_rar = ixgbevf_set_rar_vf,
379 .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
380 .set_vfta = ixgbevf_set_vfta_vf,
381};
382
383struct ixgbevf_info ixgbevf_vf_info = {
384 .mac = ixgbe_mac_82599_vf,
385 .mac_ops = &ixgbevf_mac_ops,
386};
387
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
new file mode 100644
index 000000000000..799600e92700
--- /dev/null
+++ b/drivers/net/ixgbevf/vf.h
@@ -0,0 +1,168 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef __IXGBE_VF_H__
29#define __IXGBE_VF_H__
30
31#include <linux/pci.h>
32#include <linux/delay.h>
33#include <linux/interrupt.h>
34#include <linux/if_ether.h>
35
36#include "defines.h"
37#include "regs.h"
38#include "mbx.h"
39
40struct ixgbe_hw;
41
42/* iterator type for walking multicast address lists */
43typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
44 u32 *vmdq);
45struct ixgbe_mac_operations {
46 s32 (*init_hw)(struct ixgbe_hw *);
47 s32 (*reset_hw)(struct ixgbe_hw *);
48 s32 (*start_hw)(struct ixgbe_hw *);
49 s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
50 enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
51 u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
52 s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
53 s32 (*stop_adapter)(struct ixgbe_hw *);
54 s32 (*get_bus_info)(struct ixgbe_hw *);
55
56 /* Link */
57 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
58 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
59 s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
60 bool *);
61
62 /* RAR, Multicast, VLAN */
63 s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32);
64 s32 (*init_rx_addrs)(struct ixgbe_hw *);
65 s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
66 ixgbe_mc_addr_itr);
67 s32 (*enable_mc)(struct ixgbe_hw *);
68 s32 (*disable_mc)(struct ixgbe_hw *);
69 s32 (*clear_vfta)(struct ixgbe_hw *);
70 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
71};
72
73enum ixgbe_mac_type {
74 ixgbe_mac_unknown = 0,
75 ixgbe_mac_82599_vf,
76 ixgbe_num_macs
77};
78
79struct ixgbe_mac_info {
80 struct ixgbe_mac_operations ops;
81 u8 addr[6];
82 u8 perm_addr[6];
83
84 enum ixgbe_mac_type type;
85
86 s32 mc_filter_type;
87
88 bool get_link_status;
89 u32 max_tx_queues;
90 u32 max_rx_queues;
91 u32 max_msix_vectors;
92};
93
94struct ixgbe_mbx_operations {
95 s32 (*init_params)(struct ixgbe_hw *hw);
96 s32 (*read)(struct ixgbe_hw *, u32 *, u16);
97 s32 (*write)(struct ixgbe_hw *, u32 *, u16);
98 s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16);
99 s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16);
100 s32 (*check_for_msg)(struct ixgbe_hw *);
101 s32 (*check_for_ack)(struct ixgbe_hw *);
102 s32 (*check_for_rst)(struct ixgbe_hw *);
103};
104
105struct ixgbe_mbx_stats {
106 u32 msgs_tx;
107 u32 msgs_rx;
108
109 u32 acks;
110 u32 reqs;
111 u32 rsts;
112};
113
114struct ixgbe_mbx_info {
115 struct ixgbe_mbx_operations ops;
116 struct ixgbe_mbx_stats stats;
117 u32 timeout;
118 u32 udelay;
119 u32 v2p_mailbox;
120 u16 size;
121};
122
123struct ixgbe_hw {
124 void *back;
125
126 u8 __iomem *hw_addr;
127 u8 *flash_address;
128 unsigned long io_base;
129
130 struct ixgbe_mac_info mac;
131 struct ixgbe_mbx_info mbx;
132
133 u16 device_id;
134 u16 subsystem_vendor_id;
135 u16 subsystem_device_id;
136 u16 vendor_id;
137
138 u8 revision_id;
139 bool adapter_stopped;
140};
141
142struct ixgbevf_hw_stats {
143 u64 base_vfgprc;
144 u64 base_vfgptc;
145 u64 base_vfgorc;
146 u64 base_vfgotc;
147 u64 base_vfmprc;
148
149 u64 last_vfgprc;
150 u64 last_vfgptc;
151 u64 last_vfgorc;
152 u64 last_vfgotc;
153 u64 last_vfmprc;
154
155 u64 vfgprc;
156 u64 vfgptc;
157 u64 vfgorc;
158 u64 vfgotc;
159 u64 vfmprc;
160};
161
162struct ixgbevf_info {
163 enum ixgbe_mac_type mac;
164 struct ixgbe_mac_operations *mac_ops;
165};
166
167#endif /* __IXGBE_VF_H__ */
168
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 792b88fc3574..0f31497833df 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -288,7 +288,7 @@ jme_set_rx_pcc(struct jme_adapter *jme, int p)
288 wmb(); 288 wmb();
289 289
290 if (!(test_bit(JME_FLAG_POLL, &jme->flags))) 290 if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
291 msg_rx_status(jme, "Switched to PCC_P%d\n", p); 291 netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p);
292} 292}
293 293
294static void 294static void
@@ -483,13 +483,13 @@ jme_check_link(struct net_device *netdev, int testonly)
483 strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ? 483 strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ?
484 "MDI-X" : 484 "MDI-X" :
485 "MDI"); 485 "MDI");
486 msg_link(jme, "Link is up at %s.\n", linkmsg); 486 netif_info(jme, link, jme->dev, "Link is up at %s.\n", linkmsg);
487 netif_carrier_on(netdev); 487 netif_carrier_on(netdev);
488 } else { 488 } else {
489 if (testonly) 489 if (testonly)
490 goto out; 490 goto out;
491 491
492 msg_link(jme, "Link is down.\n"); 492 netif_info(jme, link, jme->dev, "Link is down.\n");
493 jme->phylink = 0; 493 jme->phylink = 0;
494 netif_carrier_off(netdev); 494 netif_carrier_off(netdev);
495 } 495 }
@@ -883,20 +883,20 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
883 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS)) 883 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS))
884 == RXWBFLAG_TCPON)) { 884 == RXWBFLAG_TCPON)) {
885 if (flags & RXWBFLAG_IPV4) 885 if (flags & RXWBFLAG_IPV4)
886 msg_rx_err(jme, "TCP Checksum error\n"); 886 netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n");
887 return false; 887 return false;
888 } 888 }
889 889
890 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS)) 890 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
891 == RXWBFLAG_UDPON)) { 891 == RXWBFLAG_UDPON)) {
892 if (flags & RXWBFLAG_IPV4) 892 if (flags & RXWBFLAG_IPV4)
893 msg_rx_err(jme, "UDP Checksum error.\n"); 893 netif_err(jme, rx_err, jme->dev, "UDP Checksum error.\n");
894 return false; 894 return false;
895 } 895 }
896 896
897 if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS)) 897 if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS))
898 == RXWBFLAG_IPV4)) { 898 == RXWBFLAG_IPV4)) {
899 msg_rx_err(jme, "IPv4 Checksum error.\n"); 899 netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error.\n");
900 return false; 900 return false;
901 } 901 }
902 902
@@ -1186,9 +1186,9 @@ jme_link_change_tasklet(unsigned long arg)
1186 1186
1187 while (!atomic_dec_and_test(&jme->link_changing)) { 1187 while (!atomic_dec_and_test(&jme->link_changing)) {
1188 atomic_inc(&jme->link_changing); 1188 atomic_inc(&jme->link_changing);
1189 msg_intr(jme, "Get link change lock failed.\n"); 1189 netif_info(jme, intr, jme->dev, "Get link change lock failed.\n");
1190 while (atomic_read(&jme->link_changing) != 1) 1190 while (atomic_read(&jme->link_changing) != 1)
1191 msg_intr(jme, "Waiting link change lock.\n"); 1191 netif_info(jme, intr, jme->dev, "Waiting link change lock.\n");
1192 } 1192 }
1193 1193
1194 if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu) 1194 if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
@@ -1305,7 +1305,7 @@ jme_rx_empty_tasklet(unsigned long arg)
1305 if (unlikely(!netif_carrier_ok(jme->dev))) 1305 if (unlikely(!netif_carrier_ok(jme->dev)))
1306 return; 1306 return;
1307 1307
1308 msg_rx_status(jme, "RX Queue Full!\n"); 1308 netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");
1309 1309
1310 jme_rx_clean_tasklet(arg); 1310 jme_rx_clean_tasklet(arg);
1311 1311
@@ -1325,7 +1325,7 @@ jme_wake_queue_if_stopped(struct jme_adapter *jme)
1325 smp_wmb(); 1325 smp_wmb();
1326 if (unlikely(netif_queue_stopped(jme->dev) && 1326 if (unlikely(netif_queue_stopped(jme->dev) &&
1327 atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) { 1327 atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
1328 msg_tx_done(jme, "TX Queue Waked.\n"); 1328 netif_info(jme, tx_done, jme->dev, "TX Queue Waked.\n");
1329 netif_wake_queue(jme->dev); 1329 netif_wake_queue(jme->dev);
1330 } 1330 }
1331 1331
@@ -1835,7 +1835,7 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
1835 *flags |= TXFLAG_UDPCS; 1835 *flags |= TXFLAG_UDPCS;
1836 break; 1836 break;
1837 default: 1837 default:
1838 msg_tx_err(jme, "Error upper layer protocol.\n"); 1838 netif_err(jme, tx_err, jme->dev, "Error upper layer protocol.\n");
1839 break; 1839 break;
1840 } 1840 }
1841 } 1841 }
@@ -1910,12 +1910,12 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
1910 smp_wmb(); 1910 smp_wmb();
1911 if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) { 1911 if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
1912 netif_stop_queue(jme->dev); 1912 netif_stop_queue(jme->dev);
1913 msg_tx_queued(jme, "TX Queue Paused.\n"); 1913 netif_info(jme, tx_queued, jme->dev, "TX Queue Paused.\n");
1914 smp_wmb(); 1914 smp_wmb();
1915 if (atomic_read(&txring->nr_free) 1915 if (atomic_read(&txring->nr_free)
1916 >= (jme->tx_wake_threshold)) { 1916 >= (jme->tx_wake_threshold)) {
1917 netif_wake_queue(jme->dev); 1917 netif_wake_queue(jme->dev);
1918 msg_tx_queued(jme, "TX Queue Fast Waked.\n"); 1918 netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked.\n");
1919 } 1919 }
1920 } 1920 }
1921 1921
@@ -1923,7 +1923,7 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
1923 (jiffies - txbi->start_xmit) >= TX_TIMEOUT && 1923 (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
1924 txbi->skb)) { 1924 txbi->skb)) {
1925 netif_stop_queue(jme->dev); 1925 netif_stop_queue(jme->dev);
1926 msg_tx_queued(jme, "TX Queue Stopped %d@%lu.\n", idx, jiffies); 1926 netif_info(jme, tx_queued, jme->dev, "TX Queue Stopped %d@%lu.\n", idx, jiffies);
1927 } 1927 }
1928} 1928}
1929 1929
@@ -1946,7 +1946,7 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1946 1946
1947 if (unlikely(idx < 0)) { 1947 if (unlikely(idx < 0)) {
1948 netif_stop_queue(netdev); 1948 netif_stop_queue(netdev);
1949 msg_tx_err(jme, "BUG! Tx ring full when queue awake!\n"); 1949 netif_err(jme, tx_err, jme->dev, "BUG! Tx ring full when queue awake!\n");
1950 1950
1951 return NETDEV_TX_BUSY; 1951 return NETDEV_TX_BUSY;
1952 } 1952 }
@@ -1997,7 +1997,6 @@ jme_set_multi(struct net_device *netdev)
1997{ 1997{
1998 struct jme_adapter *jme = netdev_priv(netdev); 1998 struct jme_adapter *jme = netdev_priv(netdev);
1999 u32 mc_hash[2] = {}; 1999 u32 mc_hash[2] = {};
2000 int i;
2001 2000
2002 spin_lock_bh(&jme->rxmcs_lock); 2001 spin_lock_bh(&jme->rxmcs_lock);
2003 2002
@@ -2012,10 +2011,7 @@ jme_set_multi(struct net_device *netdev)
2012 int bit_nr; 2011 int bit_nr;
2013 2012
2014 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED; 2013 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
2015 for (i = 0, mclist = netdev->mc_list; 2014 netdev_for_each_mc_addr(mclist, netdev) {
2016 mclist && i < netdev->mc_count;
2017 ++i, mclist = mclist->next) {
2018
2019 bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F; 2015 bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
2020 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F); 2016 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
2021 } 2017 }
@@ -2473,7 +2469,7 @@ jme_smb_read(struct jme_adapter *jme, unsigned int addr)
2473 val = jread32(jme, JME_SMBCSR); 2469 val = jread32(jme, JME_SMBCSR);
2474 } 2470 }
2475 if (!to) { 2471 if (!to) {
2476 msg_hw(jme, "SMB Bus Busy.\n"); 2472 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
2477 return 0xFF; 2473 return 0xFF;
2478 } 2474 }
2479 2475
@@ -2489,7 +2485,7 @@ jme_smb_read(struct jme_adapter *jme, unsigned int addr)
2489 val = jread32(jme, JME_SMBINTF); 2485 val = jread32(jme, JME_SMBINTF);
2490 } 2486 }
2491 if (!to) { 2487 if (!to) {
2492 msg_hw(jme, "SMB Bus Busy.\n"); 2488 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
2493 return 0xFF; 2489 return 0xFF;
2494 } 2490 }
2495 2491
@@ -2509,7 +2505,7 @@ jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
2509 val = jread32(jme, JME_SMBCSR); 2505 val = jread32(jme, JME_SMBCSR);
2510 } 2506 }
2511 if (!to) { 2507 if (!to) {
2512 msg_hw(jme, "SMB Bus Busy.\n"); 2508 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
2513 return; 2509 return;
2514 } 2510 }
2515 2511
@@ -2526,7 +2522,7 @@ jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
2526 val = jread32(jme, JME_SMBINTF); 2522 val = jread32(jme, JME_SMBINTF);
2527 } 2523 }
2528 if (!to) { 2524 if (!to) {
2529 msg_hw(jme, "SMB Bus Busy.\n"); 2525 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
2530 return; 2526 return;
2531 } 2527 }
2532 2528
@@ -2876,14 +2872,14 @@ jme_init_one(struct pci_dev *pdev,
2876 goto err_out_unmap; 2872 goto err_out_unmap;
2877 } 2873 }
2878 2874
2879 msg_probe(jme, "%s%s ver:%x rev:%x macaddr:%pM\n", 2875 netif_info(jme, probe, jme->dev, "%s%s ver:%x rev:%x macaddr:%pM\n",
2880 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ? 2876 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
2881 "JMC250 Gigabit Ethernet" : 2877 "JMC250 Gigabit Ethernet" :
2882 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ? 2878 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
2883 "JMC260 Fast Ethernet" : "Unknown", 2879 "JMC260 Fast Ethernet" : "Unknown",
2884 (jme->fpgaver != 0) ? " (FPGA)" : "", 2880 (jme->fpgaver != 0) ? " (FPGA)" : "",
2885 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev, 2881 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
2886 jme->rev, netdev->dev_addr); 2882 jme->rev, netdev->dev_addr);
2887 2883
2888 return 0; 2884 return 0;
2889 2885
@@ -2994,7 +2990,7 @@ jme_resume(struct pci_dev *pdev)
2994} 2990}
2995#endif 2991#endif
2996 2992
2997static struct pci_device_id jme_pci_tbl[] = { 2993static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = {
2998 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) }, 2994 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) },
2999 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) }, 2995 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) },
3000 { } 2996 { }
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index 251abed3817e..c19db9146a2f 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -45,43 +45,16 @@
45 printk(KERN_ERR PFX fmt, ## args) 45 printk(KERN_ERR PFX fmt, ## args)
46 46
47#ifdef TX_DEBUG 47#ifdef TX_DEBUG
48#define tx_dbg(priv, fmt, args...) \ 48#define tx_dbg(priv, fmt, args...) \
49 printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ## args) 49 printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ##args)
50#else 50#else
51#define tx_dbg(priv, fmt, args...) 51#define tx_dbg(priv, fmt, args...) \
52do { \
53 if (0) \
54 printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ##args); \
55} while (0)
52#endif 56#endif
53 57
54#define jme_msg(msglvl, type, priv, fmt, args...) \
55 if (netif_msg_##type(priv)) \
56 printk(msglvl "%s: " fmt, (priv)->dev->name, ## args)
57
58#define msg_probe(priv, fmt, args...) \
59 jme_msg(KERN_INFO, probe, priv, fmt, ## args)
60
61#define msg_link(priv, fmt, args...) \
62 jme_msg(KERN_INFO, link, priv, fmt, ## args)
63
64#define msg_intr(priv, fmt, args...) \
65 jme_msg(KERN_INFO, intr, priv, fmt, ## args)
66
67#define msg_rx_err(priv, fmt, args...) \
68 jme_msg(KERN_ERR, rx_err, priv, fmt, ## args)
69
70#define msg_rx_status(priv, fmt, args...) \
71 jme_msg(KERN_INFO, rx_status, priv, fmt, ## args)
72
73#define msg_tx_err(priv, fmt, args...) \
74 jme_msg(KERN_ERR, tx_err, priv, fmt, ## args)
75
76#define msg_tx_done(priv, fmt, args...) \
77 jme_msg(KERN_INFO, tx_done, priv, fmt, ## args)
78
79#define msg_tx_queued(priv, fmt, args...) \
80 jme_msg(KERN_INFO, tx_queued, priv, fmt, ## args)
81
82#define msg_hw(priv, fmt, args...) \
83 jme_msg(KERN_ERR, hw, priv, fmt, ## args)
84
85/* 58/*
86 * Extra PCI Configuration space interface 59 * Extra PCI Configuration space interface
87 */ 60 */
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 25e2af6997e4..300c2249812d 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -482,7 +482,7 @@ static void korina_multicast_list(struct net_device *dev)
482{ 482{
483 struct korina_private *lp = netdev_priv(dev); 483 struct korina_private *lp = netdev_priv(dev);
484 unsigned long flags; 484 unsigned long flags;
485 struct dev_mc_list *dmi = dev->mc_list; 485 struct dev_mc_list *dmi;
486 u32 recognise = ETH_ARC_AB; /* always accept broadcasts */ 486 u32 recognise = ETH_ARC_AB; /* always accept broadcasts */
487 int i; 487 int i;
488 488
@@ -490,23 +490,21 @@ static void korina_multicast_list(struct net_device *dev)
490 if (dev->flags & IFF_PROMISC) 490 if (dev->flags & IFF_PROMISC)
491 recognise |= ETH_ARC_PRO; 491 recognise |= ETH_ARC_PRO;
492 492
493 else if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 4)) 493 else if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 4))
494 /* All multicast and broadcast */ 494 /* All multicast and broadcast */
495 recognise |= ETH_ARC_AM; 495 recognise |= ETH_ARC_AM;
496 496
497 /* Build the hash table */ 497 /* Build the hash table */
498 if (dev->mc_count > 4) { 498 if (netdev_mc_count(dev) > 4) {
499 u16 hash_table[4]; 499 u16 hash_table[4];
500 u32 crc; 500 u32 crc;
501 501
502 for (i = 0; i < 4; i++) 502 for (i = 0; i < 4; i++)
503 hash_table[i] = 0; 503 hash_table[i] = 0;
504 504
505 for (i = 0; i < dev->mc_count; i++) { 505 netdev_for_each_mc_addr(dmi, dev) {
506 char *addrs = dmi->dmi_addr; 506 char *addrs = dmi->dmi_addr;
507 507
508 dmi = dmi->next;
509
510 if (!(*addrs & 1)) 508 if (!(*addrs & 1))
511 continue; 509 continue;
512 510
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 6d3ac65bc35c..b5219cce12ed 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -965,14 +965,13 @@ static void ks8851_set_rx_mode(struct net_device *dev)
965 965
966 rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE | 966 rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE |
967 RXCR1_RXPAFMA | RXCR1_RXMAFMA); 967 RXCR1_RXPAFMA | RXCR1_RXMAFMA);
968 } else if (dev->flags & IFF_MULTICAST && dev->mc_count > 0) { 968 } else if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev)) {
969 struct dev_mc_list *mcptr = dev->mc_list; 969 struct dev_mc_list *mcptr;
970 u32 crc; 970 u32 crc;
971 int i;
972 971
973 /* accept some multicast */ 972 /* accept some multicast */
974 973
975 for (i = dev->mc_count; i > 0; i--) { 974 netdev_for_each_mc_addr(mcptr, dev) {
976 crc = ether_crc(ETH_ALEN, mcptr->dmi_addr); 975 crc = ether_crc(ETH_ALEN, mcptr->dmi_addr);
977 crc >>= (32 - 6); /* get top six bits */ 976 crc >>= (32 - 6); /* get top six bits */
978 977
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
index c0ceebccaa49..84b0e15831f9 100644
--- a/drivers/net/ks8851_mll.c
+++ b/drivers/net/ks8851_mll.c
@@ -1193,10 +1193,11 @@ static void ks_set_rx_mode(struct net_device *netdev)
1193 else 1193 else
1194 ks_set_promis(ks, false); 1194 ks_set_promis(ks, false);
1195 1195
1196 if ((netdev->flags & IFF_MULTICAST) && netdev->mc_count) { 1196 if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
1197 if (netdev->mc_count <= MAX_MCAST_LST) { 1197 if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
1198 int i = 0; 1198 int i = 0;
1199 for (ptr = netdev->mc_list; ptr; ptr = ptr->next) { 1199
1200 netdev_for_each_mc_addr(ptr, netdev) {
1200 if (!(*ptr->dmi_addr & 1)) 1201 if (!(*ptr->dmi_addr & 1))
1201 continue; 1202 continue;
1202 if (i >= MAX_MCAST_LST) 1203 if (i >= MAX_MCAST_LST)
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
new file mode 100644
index 000000000000..7264a3e5c2c0
--- /dev/null
+++ b/drivers/net/ksz884x.c
@@ -0,0 +1,7335 @@
1/**
2 * drivers/net/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver
3 *
4 * Copyright (c) 2009-2010 Micrel, Inc.
5 * Tristram Ha <Tristram.Ha@micrel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/init.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/version.h>
21#include <linux/ioport.h>
22#include <linux/pci.h>
23#include <linux/proc_fs.h>
24#include <linux/mii.h>
25#include <linux/platform_device.h>
26#include <linux/ethtool.h>
27#include <linux/etherdevice.h>
28#include <linux/in.h>
29#include <linux/ip.h>
30#include <linux/if_vlan.h>
31#include <linux/crc32.h>
32#include <linux/sched.h>
33
34
35/* DMA Registers */
36
37#define KS_DMA_TX_CTRL 0x0000
38#define DMA_TX_ENABLE 0x00000001
39#define DMA_TX_CRC_ENABLE 0x00000002
40#define DMA_TX_PAD_ENABLE 0x00000004
41#define DMA_TX_LOOPBACK 0x00000100
42#define DMA_TX_FLOW_ENABLE 0x00000200
43#define DMA_TX_CSUM_IP 0x00010000
44#define DMA_TX_CSUM_TCP 0x00020000
45#define DMA_TX_CSUM_UDP 0x00040000
46#define DMA_TX_BURST_SIZE 0x3F000000
47
48#define KS_DMA_RX_CTRL 0x0004
49#define DMA_RX_ENABLE 0x00000001
50#define KS884X_DMA_RX_MULTICAST 0x00000002
51#define DMA_RX_PROMISCUOUS 0x00000004
52#define DMA_RX_ERROR 0x00000008
53#define DMA_RX_UNICAST 0x00000010
54#define DMA_RX_ALL_MULTICAST 0x00000020
55#define DMA_RX_BROADCAST 0x00000040
56#define DMA_RX_FLOW_ENABLE 0x00000200
57#define DMA_RX_CSUM_IP 0x00010000
58#define DMA_RX_CSUM_TCP 0x00020000
59#define DMA_RX_CSUM_UDP 0x00040000
60#define DMA_RX_BURST_SIZE 0x3F000000
61
62#define DMA_BURST_SHIFT 24
63#define DMA_BURST_DEFAULT 8
64
65#define KS_DMA_TX_START 0x0008
66#define KS_DMA_RX_START 0x000C
67#define DMA_START 0x00000001
68
69#define KS_DMA_TX_ADDR 0x0010
70#define KS_DMA_RX_ADDR 0x0014
71
72#define DMA_ADDR_LIST_MASK 0xFFFFFFFC
73#define DMA_ADDR_LIST_SHIFT 2
74
75/* MTR0 */
76#define KS884X_MULTICAST_0_OFFSET 0x0020
77#define KS884X_MULTICAST_1_OFFSET 0x0021
78#define KS884X_MULTICAST_2_OFFSET 0x0022
79#define KS884x_MULTICAST_3_OFFSET 0x0023
80/* MTR1 */
81#define KS884X_MULTICAST_4_OFFSET 0x0024
82#define KS884X_MULTICAST_5_OFFSET 0x0025
83#define KS884X_MULTICAST_6_OFFSET 0x0026
84#define KS884X_MULTICAST_7_OFFSET 0x0027
85
86/* Interrupt Registers */
87
88/* INTEN */
89#define KS884X_INTERRUPTS_ENABLE 0x0028
90/* INTST */
91#define KS884X_INTERRUPTS_STATUS 0x002C
92
93#define KS884X_INT_RX_STOPPED 0x02000000
94#define KS884X_INT_TX_STOPPED 0x04000000
95#define KS884X_INT_RX_OVERRUN 0x08000000
96#define KS884X_INT_TX_EMPTY 0x10000000
97#define KS884X_INT_RX 0x20000000
98#define KS884X_INT_TX 0x40000000
99#define KS884X_INT_PHY 0x80000000
100
101#define KS884X_INT_RX_MASK \
102 (KS884X_INT_RX | KS884X_INT_RX_OVERRUN)
103#define KS884X_INT_TX_MASK \
104 (KS884X_INT_TX | KS884X_INT_TX_EMPTY)
105#define KS884X_INT_MASK (KS884X_INT_RX | KS884X_INT_TX | KS884X_INT_PHY)
106
107/* MAC Additional Station Address */
108
109/* MAAL0 */
110#define KS_ADD_ADDR_0_LO 0x0080
111/* MAAH0 */
112#define KS_ADD_ADDR_0_HI 0x0084
113/* MAAL1 */
114#define KS_ADD_ADDR_1_LO 0x0088
115/* MAAH1 */
116#define KS_ADD_ADDR_1_HI 0x008C
117/* MAAL2 */
118#define KS_ADD_ADDR_2_LO 0x0090
119/* MAAH2 */
120#define KS_ADD_ADDR_2_HI 0x0094
121/* MAAL3 */
122#define KS_ADD_ADDR_3_LO 0x0098
123/* MAAH3 */
124#define KS_ADD_ADDR_3_HI 0x009C
125/* MAAL4 */
126#define KS_ADD_ADDR_4_LO 0x00A0
127/* MAAH4 */
128#define KS_ADD_ADDR_4_HI 0x00A4
129/* MAAL5 */
130#define KS_ADD_ADDR_5_LO 0x00A8
131/* MAAH5 */
132#define KS_ADD_ADDR_5_HI 0x00AC
133/* MAAL6 */
134#define KS_ADD_ADDR_6_LO 0x00B0
135/* MAAH6 */
136#define KS_ADD_ADDR_6_HI 0x00B4
137/* MAAL7 */
138#define KS_ADD_ADDR_7_LO 0x00B8
139/* MAAH7 */
140#define KS_ADD_ADDR_7_HI 0x00BC
141/* MAAL8 */
142#define KS_ADD_ADDR_8_LO 0x00C0
143/* MAAH8 */
144#define KS_ADD_ADDR_8_HI 0x00C4
145/* MAAL9 */
146#define KS_ADD_ADDR_9_LO 0x00C8
147/* MAAH9 */
148#define KS_ADD_ADDR_9_HI 0x00CC
149/* MAAL10 */
150#define KS_ADD_ADDR_A_LO 0x00D0
151/* MAAH10 */
152#define KS_ADD_ADDR_A_HI 0x00D4
153/* MAAL11 */
154#define KS_ADD_ADDR_B_LO 0x00D8
155/* MAAH11 */
156#define KS_ADD_ADDR_B_HI 0x00DC
157/* MAAL12 */
158#define KS_ADD_ADDR_C_LO 0x00E0
159/* MAAH12 */
160#define KS_ADD_ADDR_C_HI 0x00E4
161/* MAAL13 */
162#define KS_ADD_ADDR_D_LO 0x00E8
163/* MAAH13 */
164#define KS_ADD_ADDR_D_HI 0x00EC
165/* MAAL14 */
166#define KS_ADD_ADDR_E_LO 0x00F0
167/* MAAH14 */
168#define KS_ADD_ADDR_E_HI 0x00F4
169/* MAAL15 */
170#define KS_ADD_ADDR_F_LO 0x00F8
171/* MAAH15 */
172#define KS_ADD_ADDR_F_HI 0x00FC
173
174#define ADD_ADDR_HI_MASK 0x0000FFFF
175#define ADD_ADDR_ENABLE 0x80000000
176#define ADD_ADDR_INCR 8
177
178/* Miscellaneous Registers */
179
180/* MARL */
181#define KS884X_ADDR_0_OFFSET 0x0200
182#define KS884X_ADDR_1_OFFSET 0x0201
183/* MARM */
184#define KS884X_ADDR_2_OFFSET 0x0202
185#define KS884X_ADDR_3_OFFSET 0x0203
186/* MARH */
187#define KS884X_ADDR_4_OFFSET 0x0204
188#define KS884X_ADDR_5_OFFSET 0x0205
189
190/* OBCR */
191#define KS884X_BUS_CTRL_OFFSET 0x0210
192
193#define BUS_SPEED_125_MHZ 0x0000
194#define BUS_SPEED_62_5_MHZ 0x0001
195#define BUS_SPEED_41_66_MHZ 0x0002
196#define BUS_SPEED_25_MHZ 0x0003
197
198/* EEPCR */
199#define KS884X_EEPROM_CTRL_OFFSET 0x0212
200
201#define EEPROM_CHIP_SELECT 0x0001
202#define EEPROM_SERIAL_CLOCK 0x0002
203#define EEPROM_DATA_OUT 0x0004
204#define EEPROM_DATA_IN 0x0008
205#define EEPROM_ACCESS_ENABLE 0x0010
206
207/* MBIR */
208#define KS884X_MEM_INFO_OFFSET 0x0214
209
210#define RX_MEM_TEST_FAILED 0x0008
211#define RX_MEM_TEST_FINISHED 0x0010
212#define TX_MEM_TEST_FAILED 0x0800
213#define TX_MEM_TEST_FINISHED 0x1000
214
215/* GCR */
216#define KS884X_GLOBAL_CTRL_OFFSET 0x0216
217#define GLOBAL_SOFTWARE_RESET 0x0001
218
219#define KS8841_POWER_MANAGE_OFFSET 0x0218
220
221/* WFCR */
222#define KS8841_WOL_CTRL_OFFSET 0x021A
223#define KS8841_WOL_MAGIC_ENABLE 0x0080
224#define KS8841_WOL_FRAME3_ENABLE 0x0008
225#define KS8841_WOL_FRAME2_ENABLE 0x0004
226#define KS8841_WOL_FRAME1_ENABLE 0x0002
227#define KS8841_WOL_FRAME0_ENABLE 0x0001
228
229/* WF0 */
230#define KS8841_WOL_FRAME_CRC_OFFSET 0x0220
231#define KS8841_WOL_FRAME_BYTE0_OFFSET 0x0224
232#define KS8841_WOL_FRAME_BYTE2_OFFSET 0x0228
233
234/* IACR */
235#define KS884X_IACR_P 0x04A0
236#define KS884X_IACR_OFFSET KS884X_IACR_P
237
238/* IADR1 */
239#define KS884X_IADR1_P 0x04A2
240#define KS884X_IADR2_P 0x04A4
241#define KS884X_IADR3_P 0x04A6
242#define KS884X_IADR4_P 0x04A8
243#define KS884X_IADR5_P 0x04AA
244
245#define KS884X_ACC_CTRL_SEL_OFFSET KS884X_IACR_P
246#define KS884X_ACC_CTRL_INDEX_OFFSET (KS884X_ACC_CTRL_SEL_OFFSET + 1)
247
248#define KS884X_ACC_DATA_0_OFFSET KS884X_IADR4_P
249#define KS884X_ACC_DATA_1_OFFSET (KS884X_ACC_DATA_0_OFFSET + 1)
250#define KS884X_ACC_DATA_2_OFFSET KS884X_IADR5_P
251#define KS884X_ACC_DATA_3_OFFSET (KS884X_ACC_DATA_2_OFFSET + 1)
252#define KS884X_ACC_DATA_4_OFFSET KS884X_IADR2_P
253#define KS884X_ACC_DATA_5_OFFSET (KS884X_ACC_DATA_4_OFFSET + 1)
254#define KS884X_ACC_DATA_6_OFFSET KS884X_IADR3_P
255#define KS884X_ACC_DATA_7_OFFSET (KS884X_ACC_DATA_6_OFFSET + 1)
256#define KS884X_ACC_DATA_8_OFFSET KS884X_IADR1_P
257
258/* P1MBCR */
259#define KS884X_P1MBCR_P 0x04D0
260#define KS884X_P1MBSR_P 0x04D2
261#define KS884X_PHY1ILR_P 0x04D4
262#define KS884X_PHY1IHR_P 0x04D6
263#define KS884X_P1ANAR_P 0x04D8
264#define KS884X_P1ANLPR_P 0x04DA
265
266/* P2MBCR */
267#define KS884X_P2MBCR_P 0x04E0
268#define KS884X_P2MBSR_P 0x04E2
269#define KS884X_PHY2ILR_P 0x04E4
270#define KS884X_PHY2IHR_P 0x04E6
271#define KS884X_P2ANAR_P 0x04E8
272#define KS884X_P2ANLPR_P 0x04EA
273
274#define KS884X_PHY_1_CTRL_OFFSET KS884X_P1MBCR_P
275#define PHY_CTRL_INTERVAL (KS884X_P2MBCR_P - KS884X_P1MBCR_P)
276
277#define KS884X_PHY_CTRL_OFFSET 0x00
278
279/* Mode Control Register */
280#define PHY_REG_CTRL 0
281
282#define PHY_RESET 0x8000
283#define PHY_LOOPBACK 0x4000
284#define PHY_SPEED_100MBIT 0x2000
285#define PHY_AUTO_NEG_ENABLE 0x1000
286#define PHY_POWER_DOWN 0x0800
287#define PHY_MII_DISABLE 0x0400
288#define PHY_AUTO_NEG_RESTART 0x0200
289#define PHY_FULL_DUPLEX 0x0100
290#define PHY_COLLISION_TEST 0x0080
291#define PHY_HP_MDIX 0x0020
292#define PHY_FORCE_MDIX 0x0010
293#define PHY_AUTO_MDIX_DISABLE 0x0008
294#define PHY_REMOTE_FAULT_DISABLE 0x0004
295#define PHY_TRANSMIT_DISABLE 0x0002
296#define PHY_LED_DISABLE 0x0001
297
298#define KS884X_PHY_STATUS_OFFSET 0x02
299
300/* Mode Status Register */
301#define PHY_REG_STATUS 1
302
303#define PHY_100BT4_CAPABLE 0x8000
304#define PHY_100BTX_FD_CAPABLE 0x4000
305#define PHY_100BTX_CAPABLE 0x2000
306#define PHY_10BT_FD_CAPABLE 0x1000
307#define PHY_10BT_CAPABLE 0x0800
308#define PHY_MII_SUPPRESS_CAPABLE 0x0040
309#define PHY_AUTO_NEG_ACKNOWLEDGE 0x0020
310#define PHY_REMOTE_FAULT 0x0010
311#define PHY_AUTO_NEG_CAPABLE 0x0008
312#define PHY_LINK_STATUS 0x0004
313#define PHY_JABBER_DETECT 0x0002
314#define PHY_EXTENDED_CAPABILITY 0x0001
315
316#define KS884X_PHY_ID_1_OFFSET 0x04
317#define KS884X_PHY_ID_2_OFFSET 0x06
318
319/* PHY Identifier Registers */
320#define PHY_REG_ID_1 2
321#define PHY_REG_ID_2 3
322
323#define KS884X_PHY_AUTO_NEG_OFFSET 0x08
324
325/* Auto-Negotiation Advertisement Register */
326#define PHY_REG_AUTO_NEGOTIATION 4
327
328#define PHY_AUTO_NEG_NEXT_PAGE 0x8000
329#define PHY_AUTO_NEG_REMOTE_FAULT 0x2000
330/* Not supported. */
331#define PHY_AUTO_NEG_ASYM_PAUSE 0x0800
332#define PHY_AUTO_NEG_SYM_PAUSE 0x0400
333#define PHY_AUTO_NEG_100BT4 0x0200
334#define PHY_AUTO_NEG_100BTX_FD 0x0100
335#define PHY_AUTO_NEG_100BTX 0x0080
336#define PHY_AUTO_NEG_10BT_FD 0x0040
337#define PHY_AUTO_NEG_10BT 0x0020
338#define PHY_AUTO_NEG_SELECTOR 0x001F
339#define PHY_AUTO_NEG_802_3 0x0001
340
341#define PHY_AUTO_NEG_PAUSE (PHY_AUTO_NEG_SYM_PAUSE | PHY_AUTO_NEG_ASYM_PAUSE)
342
343#define KS884X_PHY_REMOTE_CAP_OFFSET 0x0A
344
345/* Auto-Negotiation Link Partner Ability Register */
346#define PHY_REG_REMOTE_CAPABILITY 5
347
348#define PHY_REMOTE_NEXT_PAGE 0x8000
349#define PHY_REMOTE_ACKNOWLEDGE 0x4000
350#define PHY_REMOTE_REMOTE_FAULT 0x2000
351#define PHY_REMOTE_SYM_PAUSE 0x0400
352#define PHY_REMOTE_100BTX_FD 0x0100
353#define PHY_REMOTE_100BTX 0x0080
354#define PHY_REMOTE_10BT_FD 0x0040
355#define PHY_REMOTE_10BT 0x0020
356
357/* P1VCT */
358#define KS884X_P1VCT_P 0x04F0
359#define KS884X_P1PHYCTRL_P 0x04F2
360
361/* P2VCT */
362#define KS884X_P2VCT_P 0x04F4
363#define KS884X_P2PHYCTRL_P 0x04F6
364
365#define KS884X_PHY_SPECIAL_OFFSET KS884X_P1VCT_P
366#define PHY_SPECIAL_INTERVAL (KS884X_P2VCT_P - KS884X_P1VCT_P)
367
368#define KS884X_PHY_LINK_MD_OFFSET 0x00
369
370#define PHY_START_CABLE_DIAG 0x8000
371#define PHY_CABLE_DIAG_RESULT 0x6000
372#define PHY_CABLE_STAT_NORMAL 0x0000
373#define PHY_CABLE_STAT_OPEN 0x2000
374#define PHY_CABLE_STAT_SHORT 0x4000
375#define PHY_CABLE_STAT_FAILED 0x6000
376#define PHY_CABLE_10M_SHORT 0x1000
377#define PHY_CABLE_FAULT_COUNTER 0x01FF
378
379#define KS884X_PHY_PHY_CTRL_OFFSET 0x02
380
381#define PHY_STAT_REVERSED_POLARITY 0x0020
382#define PHY_STAT_MDIX 0x0010
383#define PHY_FORCE_LINK 0x0008
384#define PHY_POWER_SAVING_DISABLE 0x0004
385#define PHY_REMOTE_LOOPBACK 0x0002
386
387/* SIDER */
388#define KS884X_SIDER_P 0x0400
389#define KS884X_CHIP_ID_OFFSET KS884X_SIDER_P
390#define KS884X_FAMILY_ID_OFFSET (KS884X_CHIP_ID_OFFSET + 1)
391
392#define REG_FAMILY_ID 0x88
393
394#define REG_CHIP_ID_41 0x8810
395#define REG_CHIP_ID_42 0x8800
396
397#define KS884X_CHIP_ID_MASK_41 0xFF10
398#define KS884X_CHIP_ID_MASK 0xFFF0
399#define KS884X_CHIP_ID_SHIFT 4
400#define KS884X_REVISION_MASK 0x000E
401#define KS884X_REVISION_SHIFT 1
402#define KS8842_START 0x0001
403
404#define CHIP_IP_41_M 0x8810
405#define CHIP_IP_42_M 0x8800
406#define CHIP_IP_61_M 0x8890
407#define CHIP_IP_62_M 0x8880
408
409#define CHIP_IP_41_P 0x8850
410#define CHIP_IP_42_P 0x8840
411#define CHIP_IP_61_P 0x88D0
412#define CHIP_IP_62_P 0x88C0
413
414/* SGCR1 */
415#define KS8842_SGCR1_P 0x0402
416#define KS8842_SWITCH_CTRL_1_OFFSET KS8842_SGCR1_P
417
418#define SWITCH_PASS_ALL 0x8000
419#define SWITCH_TX_FLOW_CTRL 0x2000
420#define SWITCH_RX_FLOW_CTRL 0x1000
421#define SWITCH_CHECK_LENGTH 0x0800
422#define SWITCH_AGING_ENABLE 0x0400
423#define SWITCH_FAST_AGING 0x0200
424#define SWITCH_AGGR_BACKOFF 0x0100
425#define SWITCH_PASS_PAUSE 0x0008
426#define SWITCH_LINK_AUTO_AGING 0x0001
427
428/* SGCR2 */
429#define KS8842_SGCR2_P 0x0404
430#define KS8842_SWITCH_CTRL_2_OFFSET KS8842_SGCR2_P
431
432#define SWITCH_VLAN_ENABLE 0x8000
433#define SWITCH_IGMP_SNOOP 0x4000
434#define IPV6_MLD_SNOOP_ENABLE 0x2000
435#define IPV6_MLD_SNOOP_OPTION 0x1000
436#define PRIORITY_SCHEME_SELECT 0x0800
437#define SWITCH_MIRROR_RX_TX 0x0100
438#define UNICAST_VLAN_BOUNDARY 0x0080
439#define MULTICAST_STORM_DISABLE 0x0040
440#define SWITCH_BACK_PRESSURE 0x0020
441#define FAIR_FLOW_CTRL 0x0010
442#define NO_EXC_COLLISION_DROP 0x0008
443#define SWITCH_HUGE_PACKET 0x0004
444#define SWITCH_LEGAL_PACKET 0x0002
445#define SWITCH_BUF_RESERVE 0x0001
446
447/* SGCR3 */
448#define KS8842_SGCR3_P 0x0406
449#define KS8842_SWITCH_CTRL_3_OFFSET KS8842_SGCR3_P
450
451#define BROADCAST_STORM_RATE_LO 0xFF00
452#define SWITCH_REPEATER 0x0080
453#define SWITCH_HALF_DUPLEX 0x0040
454#define SWITCH_FLOW_CTRL 0x0020
455#define SWITCH_10_MBIT 0x0010
456#define SWITCH_REPLACE_NULL_VID 0x0008
457#define BROADCAST_STORM_RATE_HI 0x0007
458
459#define BROADCAST_STORM_RATE 0x07FF
460
461/* SGCR4 */
462#define KS8842_SGCR4_P 0x0408
463
464/* SGCR5 */
465#define KS8842_SGCR5_P 0x040A
466#define KS8842_SWITCH_CTRL_5_OFFSET KS8842_SGCR5_P
467
468#define LED_MODE 0x8200
469#define LED_SPEED_DUPLEX_ACT 0x0000
470#define LED_SPEED_DUPLEX_LINK_ACT 0x8000
471#define LED_DUPLEX_10_100 0x0200
472
473/* SGCR6 */
474#define KS8842_SGCR6_P 0x0410
475#define KS8842_SWITCH_CTRL_6_OFFSET KS8842_SGCR6_P
476
477#define KS8842_PRIORITY_MASK 3
478#define KS8842_PRIORITY_SHIFT 2
479
480/* SGCR7 */
481#define KS8842_SGCR7_P 0x0412
482#define KS8842_SWITCH_CTRL_7_OFFSET KS8842_SGCR7_P
483
484#define SWITCH_UNK_DEF_PORT_ENABLE 0x0008
485#define SWITCH_UNK_DEF_PORT_3 0x0004
486#define SWITCH_UNK_DEF_PORT_2 0x0002
487#define SWITCH_UNK_DEF_PORT_1 0x0001
488
489/* MACAR1 */
490#define KS8842_MACAR1_P 0x0470
491#define KS8842_MACAR2_P 0x0472
492#define KS8842_MACAR3_P 0x0474
493#define KS8842_MAC_ADDR_1_OFFSET KS8842_MACAR1_P
494#define KS8842_MAC_ADDR_0_OFFSET (KS8842_MAC_ADDR_1_OFFSET + 1)
495#define KS8842_MAC_ADDR_3_OFFSET KS8842_MACAR2_P
496#define KS8842_MAC_ADDR_2_OFFSET (KS8842_MAC_ADDR_3_OFFSET + 1)
497#define KS8842_MAC_ADDR_5_OFFSET KS8842_MACAR3_P
498#define KS8842_MAC_ADDR_4_OFFSET (KS8842_MAC_ADDR_5_OFFSET + 1)
499
500/* TOSR1 */
501#define KS8842_TOSR1_P 0x0480
502#define KS8842_TOSR2_P 0x0482
503#define KS8842_TOSR3_P 0x0484
504#define KS8842_TOSR4_P 0x0486
505#define KS8842_TOSR5_P 0x0488
506#define KS8842_TOSR6_P 0x048A
507#define KS8842_TOSR7_P 0x0490
508#define KS8842_TOSR8_P 0x0492
509#define KS8842_TOS_1_OFFSET KS8842_TOSR1_P
510#define KS8842_TOS_2_OFFSET KS8842_TOSR2_P
511#define KS8842_TOS_3_OFFSET KS8842_TOSR3_P
512#define KS8842_TOS_4_OFFSET KS8842_TOSR4_P
513#define KS8842_TOS_5_OFFSET KS8842_TOSR5_P
514#define KS8842_TOS_6_OFFSET KS8842_TOSR6_P
515
516#define KS8842_TOS_7_OFFSET KS8842_TOSR7_P
517#define KS8842_TOS_8_OFFSET KS8842_TOSR8_P
518
519/* P1CR1 */
520#define KS8842_P1CR1_P 0x0500
521#define KS8842_P1CR2_P 0x0502
522#define KS8842_P1VIDR_P 0x0504
523#define KS8842_P1CR3_P 0x0506
524#define KS8842_P1IRCR_P 0x0508
525#define KS8842_P1ERCR_P 0x050A
526#define KS884X_P1SCSLMD_P 0x0510
527#define KS884X_P1CR4_P 0x0512
528#define KS884X_P1SR_P 0x0514
529
530/* P2CR1 */
531#define KS8842_P2CR1_P 0x0520
532#define KS8842_P2CR2_P 0x0522
533#define KS8842_P2VIDR_P 0x0524
534#define KS8842_P2CR3_P 0x0526
535#define KS8842_P2IRCR_P 0x0528
536#define KS8842_P2ERCR_P 0x052A
537#define KS884X_P2SCSLMD_P 0x0530
538#define KS884X_P2CR4_P 0x0532
539#define KS884X_P2SR_P 0x0534
540
541/* P3CR1 */
542#define KS8842_P3CR1_P 0x0540
543#define KS8842_P3CR2_P 0x0542
544#define KS8842_P3VIDR_P 0x0544
545#define KS8842_P3CR3_P 0x0546
546#define KS8842_P3IRCR_P 0x0548
547#define KS8842_P3ERCR_P 0x054A
548
549#define KS8842_PORT_1_CTRL_1 KS8842_P1CR1_P
550#define KS8842_PORT_2_CTRL_1 KS8842_P2CR1_P
551#define KS8842_PORT_3_CTRL_1 KS8842_P3CR1_P
552
553#define PORT_CTRL_ADDR(port, addr) \
554 (addr = KS8842_PORT_1_CTRL_1 + (port) * \
555 (KS8842_PORT_2_CTRL_1 - KS8842_PORT_1_CTRL_1))
556
557#define KS8842_PORT_CTRL_1_OFFSET 0x00
558
559#define PORT_BROADCAST_STORM 0x0080
560#define PORT_DIFFSERV_ENABLE 0x0040
561#define PORT_802_1P_ENABLE 0x0020
562#define PORT_BASED_PRIORITY_MASK 0x0018
563#define PORT_BASED_PRIORITY_BASE 0x0003
564#define PORT_BASED_PRIORITY_SHIFT 3
565#define PORT_BASED_PRIORITY_0 0x0000
566#define PORT_BASED_PRIORITY_1 0x0008
567#define PORT_BASED_PRIORITY_2 0x0010
568#define PORT_BASED_PRIORITY_3 0x0018
569#define PORT_INSERT_TAG 0x0004
570#define PORT_REMOVE_TAG 0x0002
571#define PORT_PRIO_QUEUE_ENABLE 0x0001
572
573#define KS8842_PORT_CTRL_2_OFFSET 0x02
574
575#define PORT_INGRESS_VLAN_FILTER 0x4000
576#define PORT_DISCARD_NON_VID 0x2000
577#define PORT_FORCE_FLOW_CTRL 0x1000
578#define PORT_BACK_PRESSURE 0x0800
579#define PORT_TX_ENABLE 0x0400
580#define PORT_RX_ENABLE 0x0200
581#define PORT_LEARN_DISABLE 0x0100
582#define PORT_MIRROR_SNIFFER 0x0080
583#define PORT_MIRROR_RX 0x0040
584#define PORT_MIRROR_TX 0x0020
585#define PORT_USER_PRIORITY_CEILING 0x0008
586#define PORT_VLAN_MEMBERSHIP 0x0007
587
588#define KS8842_PORT_CTRL_VID_OFFSET 0x04
589
590#define PORT_DEFAULT_VID 0x0001
591
592#define KS8842_PORT_CTRL_3_OFFSET 0x06
593
594#define PORT_INGRESS_LIMIT_MODE 0x000C
595#define PORT_INGRESS_ALL 0x0000
596#define PORT_INGRESS_UNICAST 0x0004
597#define PORT_INGRESS_MULTICAST 0x0008
598#define PORT_INGRESS_BROADCAST 0x000C
599#define PORT_COUNT_IFG 0x0002
600#define PORT_COUNT_PREAMBLE 0x0001
601
602#define KS8842_PORT_IN_RATE_OFFSET 0x08
603#define KS8842_PORT_OUT_RATE_OFFSET 0x0A
604
605#define PORT_PRIORITY_RATE 0x0F
606#define PORT_PRIORITY_RATE_SHIFT 4
607
608#define KS884X_PORT_LINK_MD 0x10
609
610#define PORT_CABLE_10M_SHORT 0x8000
611#define PORT_CABLE_DIAG_RESULT 0x6000
612#define PORT_CABLE_STAT_NORMAL 0x0000
613#define PORT_CABLE_STAT_OPEN 0x2000
614#define PORT_CABLE_STAT_SHORT 0x4000
615#define PORT_CABLE_STAT_FAILED 0x6000
616#define PORT_START_CABLE_DIAG 0x1000
617#define PORT_FORCE_LINK 0x0800
618#define PORT_POWER_SAVING_DISABLE 0x0400
619#define PORT_PHY_REMOTE_LOOPBACK 0x0200
620#define PORT_CABLE_FAULT_COUNTER 0x01FF
621
622#define KS884X_PORT_CTRL_4_OFFSET 0x12
623
624#define PORT_LED_OFF 0x8000
625#define PORT_TX_DISABLE 0x4000
626#define PORT_AUTO_NEG_RESTART 0x2000
627#define PORT_REMOTE_FAULT_DISABLE 0x1000
628#define PORT_POWER_DOWN 0x0800
629#define PORT_AUTO_MDIX_DISABLE 0x0400
630#define PORT_FORCE_MDIX 0x0200
631#define PORT_LOOPBACK 0x0100
632#define PORT_AUTO_NEG_ENABLE 0x0080
633#define PORT_FORCE_100_MBIT 0x0040
634#define PORT_FORCE_FULL_DUPLEX 0x0020
635#define PORT_AUTO_NEG_SYM_PAUSE 0x0010
636#define PORT_AUTO_NEG_100BTX_FD 0x0008
637#define PORT_AUTO_NEG_100BTX 0x0004
638#define PORT_AUTO_NEG_10BT_FD 0x0002
639#define PORT_AUTO_NEG_10BT 0x0001
640
641#define KS884X_PORT_STATUS_OFFSET 0x14
642
643#define PORT_HP_MDIX 0x8000
644#define PORT_REVERSED_POLARITY 0x2000
645#define PORT_RX_FLOW_CTRL 0x0800
646#define PORT_TX_FLOW_CTRL 0x1000
647#define PORT_STATUS_SPEED_100MBIT 0x0400
648#define PORT_STATUS_FULL_DUPLEX 0x0200
649#define PORT_REMOTE_FAULT 0x0100
650#define PORT_MDIX_STATUS 0x0080
651#define PORT_AUTO_NEG_COMPLETE 0x0040
652#define PORT_STATUS_LINK_GOOD 0x0020
653#define PORT_REMOTE_SYM_PAUSE 0x0010
654#define PORT_REMOTE_100BTX_FD 0x0008
655#define PORT_REMOTE_100BTX 0x0004
656#define PORT_REMOTE_10BT_FD 0x0002
657#define PORT_REMOTE_10BT 0x0001
658
659/*
660#define STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
661#define STATIC_MAC_TABLE_FWD_PORTS 00-00070000-00000000
662#define STATIC_MAC_TABLE_VALID 00-00080000-00000000
663#define STATIC_MAC_TABLE_OVERRIDE 00-00100000-00000000
664#define STATIC_MAC_TABLE_USE_FID 00-00200000-00000000
665#define STATIC_MAC_TABLE_FID 00-03C00000-00000000
666*/
667
668#define STATIC_MAC_TABLE_ADDR 0x0000FFFF
669#define STATIC_MAC_TABLE_FWD_PORTS 0x00070000
670#define STATIC_MAC_TABLE_VALID 0x00080000
671#define STATIC_MAC_TABLE_OVERRIDE 0x00100000
672#define STATIC_MAC_TABLE_USE_FID 0x00200000
673#define STATIC_MAC_TABLE_FID 0x03C00000
674
675#define STATIC_MAC_FWD_PORTS_SHIFT 16
676#define STATIC_MAC_FID_SHIFT 22
677
678/*
679#define VLAN_TABLE_VID 00-00000000-00000FFF
680#define VLAN_TABLE_FID 00-00000000-0000F000
681#define VLAN_TABLE_MEMBERSHIP 00-00000000-00070000
682#define VLAN_TABLE_VALID 00-00000000-00080000
683*/
684
685#define VLAN_TABLE_VID 0x00000FFF
686#define VLAN_TABLE_FID 0x0000F000
687#define VLAN_TABLE_MEMBERSHIP 0x00070000
688#define VLAN_TABLE_VALID 0x00080000
689
690#define VLAN_TABLE_FID_SHIFT 12
691#define VLAN_TABLE_MEMBERSHIP_SHIFT 16
692
693/*
694#define DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
695#define DYNAMIC_MAC_TABLE_FID 00-000F0000-00000000
696#define DYNAMIC_MAC_TABLE_SRC_PORT 00-00300000-00000000
697#define DYNAMIC_MAC_TABLE_TIMESTAMP 00-00C00000-00000000
698#define DYNAMIC_MAC_TABLE_ENTRIES 03-FF000000-00000000
699#define DYNAMIC_MAC_TABLE_MAC_EMPTY 04-00000000-00000000
700#define DYNAMIC_MAC_TABLE_RESERVED 78-00000000-00000000
701#define DYNAMIC_MAC_TABLE_NOT_READY 80-00000000-00000000
702*/
703
704#define DYNAMIC_MAC_TABLE_ADDR 0x0000FFFF
705#define DYNAMIC_MAC_TABLE_FID 0x000F0000
706#define DYNAMIC_MAC_TABLE_SRC_PORT 0x00300000
707#define DYNAMIC_MAC_TABLE_TIMESTAMP 0x00C00000
708#define DYNAMIC_MAC_TABLE_ENTRIES 0xFF000000
709
710#define DYNAMIC_MAC_TABLE_ENTRIES_H 0x03
711#define DYNAMIC_MAC_TABLE_MAC_EMPTY 0x04
712#define DYNAMIC_MAC_TABLE_RESERVED 0x78
713#define DYNAMIC_MAC_TABLE_NOT_READY 0x80
714
715#define DYNAMIC_MAC_FID_SHIFT 16
716#define DYNAMIC_MAC_SRC_PORT_SHIFT 20
717#define DYNAMIC_MAC_TIMESTAMP_SHIFT 22
718#define DYNAMIC_MAC_ENTRIES_SHIFT 24
719#define DYNAMIC_MAC_ENTRIES_H_SHIFT 8
720
721/*
722#define MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
723#define MIB_COUNTER_VALID 00-00000000-40000000
724#define MIB_COUNTER_OVERFLOW 00-00000000-80000000
725*/
726
727#define MIB_COUNTER_VALUE 0x3FFFFFFF
728#define MIB_COUNTER_VALID 0x40000000
729#define MIB_COUNTER_OVERFLOW 0x80000000
730
731#define MIB_PACKET_DROPPED 0x0000FFFF
732
733#define KS_MIB_PACKET_DROPPED_TX_0 0x100
734#define KS_MIB_PACKET_DROPPED_TX_1 0x101
735#define KS_MIB_PACKET_DROPPED_TX 0x102
736#define KS_MIB_PACKET_DROPPED_RX_0 0x103
737#define KS_MIB_PACKET_DROPPED_RX_1 0x104
738#define KS_MIB_PACKET_DROPPED_RX 0x105
739
740/* Change default LED mode. */
741#define SET_DEFAULT_LED LED_SPEED_DUPLEX_ACT
742
743#define MAC_ADDR_LEN 6
744#define MAC_ADDR_ORDER(i) (MAC_ADDR_LEN - 1 - (i))
745
746#define MAX_ETHERNET_BODY_SIZE 1500
747#define ETHERNET_HEADER_SIZE 14
748
749#define MAX_ETHERNET_PACKET_SIZE \
750 (MAX_ETHERNET_BODY_SIZE + ETHERNET_HEADER_SIZE)
751
752#define REGULAR_RX_BUF_SIZE (MAX_ETHERNET_PACKET_SIZE + 4)
753#define MAX_RX_BUF_SIZE (1912 + 4)
754
755#define ADDITIONAL_ENTRIES 16
756#define MAX_MULTICAST_LIST 32
757
758#define HW_MULTICAST_SIZE 8
759
760#define HW_TO_DEV_PORT(port) (port - 1)
761
762enum {
763 media_connected,
764 media_disconnected
765};
766
767enum {
768 OID_COUNTER_UNKOWN,
769
770 OID_COUNTER_FIRST,
771
772 /* total transmit errors */
773 OID_COUNTER_XMIT_ERROR,
774
775 /* total receive errors */
776 OID_COUNTER_RCV_ERROR,
777
778 OID_COUNTER_LAST
779};
780
781/*
782 * Hardware descriptor definitions
783 */
784
785#define DESC_ALIGNMENT 16
786#define BUFFER_ALIGNMENT 8
787
788#define NUM_OF_RX_DESC 64
789#define NUM_OF_TX_DESC 64
790
791#define KS_DESC_RX_FRAME_LEN 0x000007FF
792#define KS_DESC_RX_FRAME_TYPE 0x00008000
793#define KS_DESC_RX_ERROR_CRC 0x00010000
794#define KS_DESC_RX_ERROR_RUNT 0x00020000
795#define KS_DESC_RX_ERROR_TOO_LONG 0x00040000
796#define KS_DESC_RX_ERROR_PHY 0x00080000
797#define KS884X_DESC_RX_PORT_MASK 0x00300000
798#define KS_DESC_RX_MULTICAST 0x01000000
799#define KS_DESC_RX_ERROR 0x02000000
800#define KS_DESC_RX_ERROR_CSUM_UDP 0x04000000
801#define KS_DESC_RX_ERROR_CSUM_TCP 0x08000000
802#define KS_DESC_RX_ERROR_CSUM_IP 0x10000000
803#define KS_DESC_RX_LAST 0x20000000
804#define KS_DESC_RX_FIRST 0x40000000
805#define KS_DESC_RX_ERROR_COND \
806 (KS_DESC_RX_ERROR_CRC | \
807 KS_DESC_RX_ERROR_RUNT | \
808 KS_DESC_RX_ERROR_PHY | \
809 KS_DESC_RX_ERROR_TOO_LONG)
810
811#define KS_DESC_HW_OWNED 0x80000000
812
813#define KS_DESC_BUF_SIZE 0x000007FF
814#define KS884X_DESC_TX_PORT_MASK 0x00300000
815#define KS_DESC_END_OF_RING 0x02000000
816#define KS_DESC_TX_CSUM_GEN_UDP 0x04000000
817#define KS_DESC_TX_CSUM_GEN_TCP 0x08000000
818#define KS_DESC_TX_CSUM_GEN_IP 0x10000000
819#define KS_DESC_TX_LAST 0x20000000
820#define KS_DESC_TX_FIRST 0x40000000
821#define KS_DESC_TX_INTERRUPT 0x80000000
822
823#define KS_DESC_PORT_SHIFT 20
824
825#define KS_DESC_RX_MASK (KS_DESC_BUF_SIZE)
826
827#define KS_DESC_TX_MASK \
828 (KS_DESC_TX_INTERRUPT | \
829 KS_DESC_TX_FIRST | \
830 KS_DESC_TX_LAST | \
831 KS_DESC_TX_CSUM_GEN_IP | \
832 KS_DESC_TX_CSUM_GEN_TCP | \
833 KS_DESC_TX_CSUM_GEN_UDP | \
834 KS_DESC_BUF_SIZE)
835
836struct ksz_desc_rx_stat {
837#ifdef __BIG_ENDIAN_BITFIELD
838 u32 hw_owned:1;
839 u32 first_desc:1;
840 u32 last_desc:1;
841 u32 csum_err_ip:1;
842 u32 csum_err_tcp:1;
843 u32 csum_err_udp:1;
844 u32 error:1;
845 u32 multicast:1;
846 u32 src_port:4;
847 u32 err_phy:1;
848 u32 err_too_long:1;
849 u32 err_runt:1;
850 u32 err_crc:1;
851 u32 frame_type:1;
852 u32 reserved1:4;
853 u32 frame_len:11;
854#else
855 u32 frame_len:11;
856 u32 reserved1:4;
857 u32 frame_type:1;
858 u32 err_crc:1;
859 u32 err_runt:1;
860 u32 err_too_long:1;
861 u32 err_phy:1;
862 u32 src_port:4;
863 u32 multicast:1;
864 u32 error:1;
865 u32 csum_err_udp:1;
866 u32 csum_err_tcp:1;
867 u32 csum_err_ip:1;
868 u32 last_desc:1;
869 u32 first_desc:1;
870 u32 hw_owned:1;
871#endif
872};
873
874struct ksz_desc_tx_stat {
875#ifdef __BIG_ENDIAN_BITFIELD
876 u32 hw_owned:1;
877 u32 reserved1:31;
878#else
879 u32 reserved1:31;
880 u32 hw_owned:1;
881#endif
882};
883
884struct ksz_desc_rx_buf {
885#ifdef __BIG_ENDIAN_BITFIELD
886 u32 reserved4:6;
887 u32 end_of_ring:1;
888 u32 reserved3:14;
889 u32 buf_size:11;
890#else
891 u32 buf_size:11;
892 u32 reserved3:14;
893 u32 end_of_ring:1;
894 u32 reserved4:6;
895#endif
896};
897
898struct ksz_desc_tx_buf {
899#ifdef __BIG_ENDIAN_BITFIELD
900 u32 intr:1;
901 u32 first_seg:1;
902 u32 last_seg:1;
903 u32 csum_gen_ip:1;
904 u32 csum_gen_tcp:1;
905 u32 csum_gen_udp:1;
906 u32 end_of_ring:1;
907 u32 reserved4:1;
908 u32 dest_port:4;
909 u32 reserved3:9;
910 u32 buf_size:11;
911#else
912 u32 buf_size:11;
913 u32 reserved3:9;
914 u32 dest_port:4;
915 u32 reserved4:1;
916 u32 end_of_ring:1;
917 u32 csum_gen_udp:1;
918 u32 csum_gen_tcp:1;
919 u32 csum_gen_ip:1;
920 u32 last_seg:1;
921 u32 first_seg:1;
922 u32 intr:1;
923#endif
924};
925
926union desc_stat {
927 struct ksz_desc_rx_stat rx;
928 struct ksz_desc_tx_stat tx;
929 u32 data;
930};
931
932union desc_buf {
933 struct ksz_desc_rx_buf rx;
934 struct ksz_desc_tx_buf tx;
935 u32 data;
936};
937
938/**
939 * struct ksz_hw_desc - Hardware descriptor data structure
940 * @ctrl: Descriptor control value.
941 * @buf: Descriptor buffer value.
942 * @addr: Physical address of memory buffer.
943 * @next: Pointer to next hardware descriptor.
944 */
945struct ksz_hw_desc {
946 union desc_stat ctrl;
947 union desc_buf buf;
948 u32 addr;
949 u32 next;
950};
951
952/**
953 * struct ksz_sw_desc - Software descriptor data structure
954 * @ctrl: Descriptor control value.
955 * @buf: Descriptor buffer value.
956 * @buf_size: Current buffers size value in hardware descriptor.
957 */
958struct ksz_sw_desc {
959 union desc_stat ctrl;
960 union desc_buf buf;
961 u32 buf_size;
962};
963
964/**
965 * struct ksz_dma_buf - OS dependent DMA buffer data structure
966 * @skb: Associated socket buffer.
967 * @dma: Associated physical DMA address.
968 * len: Actual len used.
969 */
970struct ksz_dma_buf {
971 struct sk_buff *skb;
972 dma_addr_t dma;
973 int len;
974};
975
976/**
977 * struct ksz_desc - Descriptor structure
978 * @phw: Hardware descriptor pointer to uncached physical memory.
979 * @sw: Cached memory to hold hardware descriptor values for
980 * manipulation.
981 * @dma_buf: Operating system dependent data structure to hold physical
982 * memory buffer allocation information.
983 */
984struct ksz_desc {
985 struct ksz_hw_desc *phw;
986 struct ksz_sw_desc sw;
987 struct ksz_dma_buf dma_buf;
988};
989
990#define DMA_BUFFER(desc) ((struct ksz_dma_buf *)(&(desc)->dma_buf))
991
992/**
993 * struct ksz_desc_info - Descriptor information data structure
994 * @ring: First descriptor in the ring.
995 * @cur: Current descriptor being manipulated.
996 * @ring_virt: First hardware descriptor in the ring.
997 * @ring_phys: The physical address of the first descriptor of the ring.
998 * @size: Size of hardware descriptor.
999 * @alloc: Number of descriptors allocated.
1000 * @avail: Number of descriptors available for use.
1001 * @last: Index for last descriptor released to hardware.
1002 * @next: Index for next descriptor available for use.
1003 * @mask: Mask for index wrapping.
1004 */
1005struct ksz_desc_info {
1006 struct ksz_desc *ring;
1007 struct ksz_desc *cur;
1008 struct ksz_hw_desc *ring_virt;
1009 u32 ring_phys;
1010 int size;
1011 int alloc;
1012 int avail;
1013 int last;
1014 int next;
1015 int mask;
1016};
1017
1018/*
1019 * KSZ8842 switch definitions
1020 */
1021
1022enum {
1023 TABLE_STATIC_MAC = 0,
1024 TABLE_VLAN,
1025 TABLE_DYNAMIC_MAC,
1026 TABLE_MIB
1027};
1028
1029#define LEARNED_MAC_TABLE_ENTRIES 1024
1030#define STATIC_MAC_TABLE_ENTRIES 8
1031
1032/**
1033 * struct ksz_mac_table - Static MAC table data structure
1034 * @mac_addr: MAC address to filter.
1035 * @vid: VID value.
1036 * @fid: FID value.
1037 * @ports: Port membership.
1038 * @override: Override setting.
1039 * @use_fid: FID use setting.
1040 * @valid: Valid setting indicating the entry is being used.
1041 */
1042struct ksz_mac_table {
1043 u8 mac_addr[MAC_ADDR_LEN];
1044 u16 vid;
1045 u8 fid;
1046 u8 ports;
1047 u8 override:1;
1048 u8 use_fid:1;
1049 u8 valid:1;
1050};
1051
1052#define VLAN_TABLE_ENTRIES 16
1053
1054/**
1055 * struct ksz_vlan_table - VLAN table data structure
1056 * @vid: VID value.
1057 * @fid: FID value.
1058 * @member: Port membership.
1059 */
1060struct ksz_vlan_table {
1061 u16 vid;
1062 u8 fid;
1063 u8 member;
1064};
1065
1066#define DIFFSERV_ENTRIES 64
1067#define PRIO_802_1P_ENTRIES 8
1068#define PRIO_QUEUES 4
1069
1070#define SWITCH_PORT_NUM 2
1071#define TOTAL_PORT_NUM (SWITCH_PORT_NUM + 1)
1072#define HOST_MASK (1 << SWITCH_PORT_NUM)
1073#define PORT_MASK 7
1074
1075#define MAIN_PORT 0
1076#define OTHER_PORT 1
1077#define HOST_PORT SWITCH_PORT_NUM
1078
1079#define PORT_COUNTER_NUM 0x20
1080#define TOTAL_PORT_COUNTER_NUM (PORT_COUNTER_NUM + 2)
1081
1082#define MIB_COUNTER_RX_LO_PRIORITY 0x00
1083#define MIB_COUNTER_RX_HI_PRIORITY 0x01
1084#define MIB_COUNTER_RX_UNDERSIZE 0x02
1085#define MIB_COUNTER_RX_FRAGMENT 0x03
1086#define MIB_COUNTER_RX_OVERSIZE 0x04
1087#define MIB_COUNTER_RX_JABBER 0x05
1088#define MIB_COUNTER_RX_SYMBOL_ERR 0x06
1089#define MIB_COUNTER_RX_CRC_ERR 0x07
1090#define MIB_COUNTER_RX_ALIGNMENT_ERR 0x08
1091#define MIB_COUNTER_RX_CTRL_8808 0x09
1092#define MIB_COUNTER_RX_PAUSE 0x0A
1093#define MIB_COUNTER_RX_BROADCAST 0x0B
1094#define MIB_COUNTER_RX_MULTICAST 0x0C
1095#define MIB_COUNTER_RX_UNICAST 0x0D
1096#define MIB_COUNTER_RX_OCTET_64 0x0E
1097#define MIB_COUNTER_RX_OCTET_65_127 0x0F
1098#define MIB_COUNTER_RX_OCTET_128_255 0x10
1099#define MIB_COUNTER_RX_OCTET_256_511 0x11
1100#define MIB_COUNTER_RX_OCTET_512_1023 0x12
1101#define MIB_COUNTER_RX_OCTET_1024_1522 0x13
1102#define MIB_COUNTER_TX_LO_PRIORITY 0x14
1103#define MIB_COUNTER_TX_HI_PRIORITY 0x15
1104#define MIB_COUNTER_TX_LATE_COLLISION 0x16
1105#define MIB_COUNTER_TX_PAUSE 0x17
1106#define MIB_COUNTER_TX_BROADCAST 0x18
1107#define MIB_COUNTER_TX_MULTICAST 0x19
1108#define MIB_COUNTER_TX_UNICAST 0x1A
1109#define MIB_COUNTER_TX_DEFERRED 0x1B
1110#define MIB_COUNTER_TX_TOTAL_COLLISION 0x1C
1111#define MIB_COUNTER_TX_EXCESS_COLLISION 0x1D
1112#define MIB_COUNTER_TX_SINGLE_COLLISION 0x1E
1113#define MIB_COUNTER_TX_MULTI_COLLISION 0x1F
1114
1115#define MIB_COUNTER_RX_DROPPED_PACKET 0x20
1116#define MIB_COUNTER_TX_DROPPED_PACKET 0x21
1117
1118/**
1119 * struct ksz_port_mib - Port MIB data structure
1120 * @cnt_ptr: Current pointer to MIB counter index.
1121 * @link_down: Indication the link has just gone down.
1122 * @state: Connection status of the port.
1123 * @mib_start: The starting counter index. Some ports do not start at 0.
1124 * @counter: 64-bit MIB counter value.
1125 * @dropped: Temporary buffer to remember last read packet dropped values.
1126 *
1127 * MIB counters needs to be read periodically so that counters do not get
1128 * overflowed and give incorrect values. A right balance is needed to
1129 * satisfy this condition and not waste too much CPU time.
1130 *
1131 * It is pointless to read MIB counters when the port is disconnected. The
1132 * @state provides the connection status so that MIB counters are read only
1133 * when the port is connected. The @link_down indicates the port is just
1134 * disconnected so that all MIB counters are read one last time to update the
1135 * information.
1136 */
1137struct ksz_port_mib {
1138 u8 cnt_ptr;
1139 u8 link_down;
1140 u8 state;
1141 u8 mib_start;
1142
1143 u64 counter[TOTAL_PORT_COUNTER_NUM];
1144 u32 dropped[2];
1145};
1146
1147/**
1148 * struct ksz_port_cfg - Port configuration data structure
1149 * @vid: VID value.
1150 * @member: Port membership.
1151 * @port_prio: Port priority.
1152 * @rx_rate: Receive priority rate.
1153 * @tx_rate: Transmit priority rate.
1154 * @stp_state: Current Spanning Tree Protocol state.
1155 */
1156struct ksz_port_cfg {
1157 u16 vid;
1158 u8 member;
1159 u8 port_prio;
1160 u32 rx_rate[PRIO_QUEUES];
1161 u32 tx_rate[PRIO_QUEUES];
1162 int stp_state;
1163};
1164
1165/**
1166 * struct ksz_switch - KSZ8842 switch data structure
1167 * @mac_table: MAC table entries information.
1168 * @vlan_table: VLAN table entries information.
1169 * @port_cfg: Port configuration information.
1170 * @diffserv: DiffServ priority settings. Possible values from 6-bit of ToS
1171 * (bit7 ~ bit2) field.
1172 * @p_802_1p: 802.1P priority settings. Possible values from 3-bit of 802.1p
1173 * Tag priority field.
1174 * @br_addr: Bridge address. Used for STP.
1175 * @other_addr: Other MAC address. Used for multiple network device mode.
1176 * @broad_per: Broadcast storm percentage.
1177 * @member: Current port membership. Used for STP.
1178 */
1179struct ksz_switch {
1180 struct ksz_mac_table mac_table[STATIC_MAC_TABLE_ENTRIES];
1181 struct ksz_vlan_table vlan_table[VLAN_TABLE_ENTRIES];
1182 struct ksz_port_cfg port_cfg[TOTAL_PORT_NUM];
1183
1184 u8 diffserv[DIFFSERV_ENTRIES];
1185 u8 p_802_1p[PRIO_802_1P_ENTRIES];
1186
1187 u8 br_addr[MAC_ADDR_LEN];
1188 u8 other_addr[MAC_ADDR_LEN];
1189
1190 u8 broad_per;
1191 u8 member;
1192};
1193
1194#define TX_RATE_UNIT 10000
1195
1196/**
1197 * struct ksz_port_info - Port information data structure
1198 * @state: Connection status of the port.
1199 * @tx_rate: Transmit rate divided by 10000 to get Mbit.
1200 * @duplex: Duplex mode.
1201 * @advertised: Advertised auto-negotiation setting. Used to determine link.
1202 * @partner: Auto-negotiation partner setting. Used to determine link.
1203 * @port_id: Port index to access actual hardware register.
1204 * @pdev: Pointer to OS dependent network device.
1205 */
1206struct ksz_port_info {
1207 uint state;
1208 uint tx_rate;
1209 u8 duplex;
1210 u8 advertised;
1211 u8 partner;
1212 u8 port_id;
1213 void *pdev;
1214};
1215
1216#define MAX_TX_HELD_SIZE 52000
1217
1218/* Hardware features and bug fixes. */
1219#define LINK_INT_WORKING (1 << 0)
1220#define SMALL_PACKET_TX_BUG (1 << 1)
1221#define HALF_DUPLEX_SIGNAL_BUG (1 << 2)
1222#define IPV6_CSUM_GEN_HACK (1 << 3)
1223#define RX_HUGE_FRAME (1 << 4)
1224#define STP_SUPPORT (1 << 8)
1225
1226/* Software overrides. */
1227#define PAUSE_FLOW_CTRL (1 << 0)
1228#define FAST_AGING (1 << 1)
1229
1230/**
1231 * struct ksz_hw - KSZ884X hardware data structure
1232 * @io: Virtual address assigned.
1233 * @ksz_switch: Pointer to KSZ8842 switch.
1234 * @port_info: Port information.
1235 * @port_mib: Port MIB information.
1236 * @dev_count: Number of network devices this hardware supports.
1237 * @dst_ports: Destination ports in switch for transmission.
1238 * @id: Hardware ID. Used for display only.
1239 * @mib_cnt: Number of MIB counters this hardware has.
1240 * @mib_port_cnt: Number of ports with MIB counters.
1241 * @tx_cfg: Cached transmit control settings.
1242 * @rx_cfg: Cached receive control settings.
1243 * @intr_mask: Current interrupt mask.
1244 * @intr_set: Current interrup set.
1245 * @intr_blocked: Interrupt blocked.
1246 * @rx_desc_info: Receive descriptor information.
1247 * @tx_desc_info: Transmit descriptor information.
1248 * @tx_int_cnt: Transmit interrupt count. Used for TX optimization.
1249 * @tx_int_mask: Transmit interrupt mask. Used for TX optimization.
1250 * @tx_size: Transmit data size. Used for TX optimization.
1251 * The maximum is defined by MAX_TX_HELD_SIZE.
1252 * @perm_addr: Permanent MAC address.
1253 * @override_addr: Overrided MAC address.
1254 * @address: Additional MAC address entries.
1255 * @addr_list_size: Additional MAC address list size.
1256 * @mac_override: Indication of MAC address overrided.
1257 * @promiscuous: Counter to keep track of promiscuous mode set.
1258 * @all_multi: Counter to keep track of all multicast mode set.
1259 * @multi_list: Multicast address entries.
1260 * @multi_bits: Cached multicast hash table settings.
1261 * @multi_list_size: Multicast address list size.
1262 * @enabled: Indication of hardware enabled.
1263 * @rx_stop: Indication of receive process stop.
1264 * @features: Hardware features to enable.
1265 * @overrides: Hardware features to override.
1266 * @parent: Pointer to parent, network device private structure.
1267 */
1268struct ksz_hw {
1269 void __iomem *io;
1270
1271 struct ksz_switch *ksz_switch;
1272 struct ksz_port_info port_info[SWITCH_PORT_NUM];
1273 struct ksz_port_mib port_mib[TOTAL_PORT_NUM];
1274 int dev_count;
1275 int dst_ports;
1276 int id;
1277 int mib_cnt;
1278 int mib_port_cnt;
1279
1280 u32 tx_cfg;
1281 u32 rx_cfg;
1282 u32 intr_mask;
1283 u32 intr_set;
1284 uint intr_blocked;
1285
1286 struct ksz_desc_info rx_desc_info;
1287 struct ksz_desc_info tx_desc_info;
1288
1289 int tx_int_cnt;
1290 int tx_int_mask;
1291 int tx_size;
1292
1293 u8 perm_addr[MAC_ADDR_LEN];
1294 u8 override_addr[MAC_ADDR_LEN];
1295 u8 address[ADDITIONAL_ENTRIES][MAC_ADDR_LEN];
1296 u8 addr_list_size;
1297 u8 mac_override;
1298 u8 promiscuous;
1299 u8 all_multi;
1300 u8 multi_list[MAX_MULTICAST_LIST][MAC_ADDR_LEN];
1301 u8 multi_bits[HW_MULTICAST_SIZE];
1302 u8 multi_list_size;
1303
1304 u8 enabled;
1305 u8 rx_stop;
1306 u8 reserved2[1];
1307
1308 uint features;
1309 uint overrides;
1310
1311 void *parent;
1312};
1313
1314enum {
1315 PHY_NO_FLOW_CTRL,
1316 PHY_FLOW_CTRL,
1317 PHY_TX_ONLY,
1318 PHY_RX_ONLY
1319};
1320
1321/**
1322 * struct ksz_port - Virtual port data structure
1323 * @duplex: Duplex mode setting. 1 for half duplex, 2 for full
1324 * duplex, and 0 for auto, which normally results in full
1325 * duplex.
1326 * @speed: Speed setting. 10 for 10 Mbit, 100 for 100 Mbit, and
1327 * 0 for auto, which normally results in 100 Mbit.
1328 * @force_link: Force link setting. 0 for auto-negotiation, and 1 for
1329 * force.
1330 * @flow_ctrl: Flow control setting. PHY_NO_FLOW_CTRL for no flow
1331 * control, and PHY_FLOW_CTRL for flow control.
1332 * PHY_TX_ONLY and PHY_RX_ONLY are not supported for 100
1333 * Mbit PHY.
1334 * @first_port: Index of first port this port supports.
1335 * @mib_port_cnt: Number of ports with MIB counters.
1336 * @port_cnt: Number of ports this port supports.
1337 * @counter: Port statistics counter.
1338 * @hw: Pointer to hardware structure.
1339 * @linked: Pointer to port information linked to this port.
1340 */
1341struct ksz_port {
1342 u8 duplex;
1343 u8 speed;
1344 u8 force_link;
1345 u8 flow_ctrl;
1346
1347 int first_port;
1348 int mib_port_cnt;
1349 int port_cnt;
1350 u64 counter[OID_COUNTER_LAST];
1351
1352 struct ksz_hw *hw;
1353 struct ksz_port_info *linked;
1354};
1355
1356/**
1357 * struct ksz_timer_info - Timer information data structure
1358 * @timer: Kernel timer.
1359 * @cnt: Running timer counter.
1360 * @max: Number of times to run timer; -1 for infinity.
1361 * @period: Timer period in jiffies.
1362 */
1363struct ksz_timer_info {
1364 struct timer_list timer;
1365 int cnt;
1366 int max;
1367 int period;
1368};
1369
1370/**
1371 * struct ksz_shared_mem - OS dependent shared memory data structure
1372 * @dma_addr: Physical DMA address allocated.
1373 * @alloc_size: Allocation size.
1374 * @phys: Actual physical address used.
1375 * @alloc_virt: Virtual address allocated.
1376 * @virt: Actual virtual address used.
1377 */
1378struct ksz_shared_mem {
1379 dma_addr_t dma_addr;
1380 uint alloc_size;
1381 uint phys;
1382 u8 *alloc_virt;
1383 u8 *virt;
1384};
1385
1386/**
1387 * struct ksz_counter_info - OS dependent counter information data structure
1388 * @counter: Wait queue to wakeup after counters are read.
1389 * @time: Next time in jiffies to read counter.
1390 * @read: Indication of counters read in full or not.
1391 */
1392struct ksz_counter_info {
1393 wait_queue_head_t counter;
1394 unsigned long time;
1395 int read;
1396};
1397
1398/**
1399 * struct dev_info - Network device information data structure
1400 * @dev: Pointer to network device.
1401 * @pdev: Pointer to PCI device.
1402 * @hw: Hardware structure.
1403 * @desc_pool: Physical memory used for descriptor pool.
1404 * @hwlock: Spinlock to prevent hardware from accessing.
1405 * @lock: Mutex lock to prevent device from accessing.
1406 * @dev_rcv: Receive process function used.
1407 * @last_skb: Socket buffer allocated for descriptor rx fragments.
1408 * @skb_index: Buffer index for receiving fragments.
1409 * @skb_len: Buffer length for receiving fragments.
1410 * @mib_read: Workqueue to read MIB counters.
1411 * @mib_timer_info: Timer to read MIB counters.
1412 * @counter: Used for MIB reading.
1413 * @mtu: Current MTU used. The default is REGULAR_RX_BUF_SIZE;
1414 * the maximum is MAX_RX_BUF_SIZE.
1415 * @opened: Counter to keep track of device open.
1416 * @rx_tasklet: Receive processing tasklet.
1417 * @tx_tasklet: Transmit processing tasklet.
1418 * @wol_enable: Wake-on-LAN enable set by ethtool.
1419 * @wol_support: Wake-on-LAN support used by ethtool.
1420 * @pme_wait: Used for KSZ8841 power management.
1421 */
1422struct dev_info {
1423 struct net_device *dev;
1424 struct pci_dev *pdev;
1425
1426 struct ksz_hw hw;
1427 struct ksz_shared_mem desc_pool;
1428
1429 spinlock_t hwlock;
1430 struct mutex lock;
1431
1432 int (*dev_rcv)(struct dev_info *);
1433
1434 struct sk_buff *last_skb;
1435 int skb_index;
1436 int skb_len;
1437
1438 struct work_struct mib_read;
1439 struct ksz_timer_info mib_timer_info;
1440 struct ksz_counter_info counter[TOTAL_PORT_NUM];
1441
1442 int mtu;
1443 int opened;
1444
1445 struct tasklet_struct rx_tasklet;
1446 struct tasklet_struct tx_tasklet;
1447
1448 int wol_enable;
1449 int wol_support;
1450 unsigned long pme_wait;
1451};
1452
1453/**
1454 * struct dev_priv - Network device private data structure
1455 * @adapter: Adapter device information.
1456 * @port: Port information.
1457 * @monitor_time_info: Timer to monitor ports.
1458 * @stats: Network statistics.
1459 * @proc_sem: Semaphore for proc accessing.
1460 * @id: Device ID.
1461 * @mii_if: MII interface information.
1462 * @advertising: Temporary variable to store advertised settings.
1463 * @msg_enable: The message flags controlling driver output.
1464 * @media_state: The connection status of the device.
1465 * @multicast: The all multicast state of the device.
1466 * @promiscuous: The promiscuous state of the device.
1467 */
1468struct dev_priv {
1469 struct dev_info *adapter;
1470 struct ksz_port port;
1471 struct ksz_timer_info monitor_timer_info;
1472 struct net_device_stats stats;
1473
1474 struct semaphore proc_sem;
1475 int id;
1476
1477 struct mii_if_info mii_if;
1478 u32 advertising;
1479
1480 u32 msg_enable;
1481 int media_state;
1482 int multicast;
1483 int promiscuous;
1484};
1485
1486#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
1487#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
1488#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
1489#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
1490
1491#define DRV_NAME "KSZ884X PCI"
1492#define DEVICE_NAME "KSZ884x PCI"
1493#define DRV_VERSION "1.0.0"
1494#define DRV_RELDATE "Feb 8, 2010"
1495
1496static char version[] __devinitdata =
1497 "Micrel " DEVICE_NAME " " DRV_VERSION " (" DRV_RELDATE ")";
1498
1499static u8 DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x88, 0x42, 0x01 };
1500
1501/*
1502 * Interrupt processing primary routines
1503 */
1504
1505static inline void hw_ack_intr(struct ksz_hw *hw, uint interrupt)
1506{
1507 writel(interrupt, hw->io + KS884X_INTERRUPTS_STATUS);
1508}
1509
1510static inline void hw_dis_intr(struct ksz_hw *hw)
1511{
1512 hw->intr_blocked = hw->intr_mask;
1513 writel(0, hw->io + KS884X_INTERRUPTS_ENABLE);
1514 hw->intr_set = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
1515}
1516
1517static inline void hw_set_intr(struct ksz_hw *hw, uint interrupt)
1518{
1519 hw->intr_set = interrupt;
1520 writel(interrupt, hw->io + KS884X_INTERRUPTS_ENABLE);
1521}
1522
1523static inline void hw_ena_intr(struct ksz_hw *hw)
1524{
1525 hw->intr_blocked = 0;
1526 hw_set_intr(hw, hw->intr_mask);
1527}
1528
1529static inline void hw_dis_intr_bit(struct ksz_hw *hw, uint bit)
1530{
1531 hw->intr_mask &= ~(bit);
1532}
1533
1534static inline void hw_turn_off_intr(struct ksz_hw *hw, uint interrupt)
1535{
1536 u32 read_intr;
1537
1538 read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
1539 hw->intr_set = read_intr & ~interrupt;
1540 writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
1541 hw_dis_intr_bit(hw, interrupt);
1542}
1543
1544/**
1545 * hw_turn_on_intr - turn on specified interrupts
1546 * @hw: The hardware instance.
1547 * @bit: The interrupt bits to be on.
1548 *
1549 * This routine turns on the specified interrupts in the interrupt mask so that
1550 * those interrupts will be enabled.
1551 */
1552static void hw_turn_on_intr(struct ksz_hw *hw, u32 bit)
1553{
1554 hw->intr_mask |= bit;
1555
1556 if (!hw->intr_blocked)
1557 hw_set_intr(hw, hw->intr_mask);
1558}
1559
1560static inline void hw_ena_intr_bit(struct ksz_hw *hw, uint interrupt)
1561{
1562 u32 read_intr;
1563
1564 read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
1565 hw->intr_set = read_intr | interrupt;
1566 writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
1567}
1568
1569static inline void hw_read_intr(struct ksz_hw *hw, uint *status)
1570{
1571 *status = readl(hw->io + KS884X_INTERRUPTS_STATUS);
1572 *status = *status & hw->intr_set;
1573}
1574
1575static inline void hw_restore_intr(struct ksz_hw *hw, uint interrupt)
1576{
1577 if (interrupt)
1578 hw_ena_intr(hw);
1579}
1580
1581/**
1582 * hw_block_intr - block hardware interrupts
1583 *
1584 * This function blocks all interrupts of the hardware and returns the current
1585 * interrupt enable mask so that interrupts can be restored later.
1586 *
1587 * Return the current interrupt enable mask.
1588 */
1589static uint hw_block_intr(struct ksz_hw *hw)
1590{
1591 uint interrupt = 0;
1592
1593 if (!hw->intr_blocked) {
1594 hw_dis_intr(hw);
1595 interrupt = hw->intr_blocked;
1596 }
1597 return interrupt;
1598}
1599
1600/*
1601 * Hardware descriptor routines
1602 */
1603
1604static inline void reset_desc(struct ksz_desc *desc, union desc_stat status)
1605{
1606 status.rx.hw_owned = 0;
1607 desc->phw->ctrl.data = cpu_to_le32(status.data);
1608}
1609
1610static inline void release_desc(struct ksz_desc *desc)
1611{
1612 desc->sw.ctrl.tx.hw_owned = 1;
1613 if (desc->sw.buf_size != desc->sw.buf.data) {
1614 desc->sw.buf_size = desc->sw.buf.data;
1615 desc->phw->buf.data = cpu_to_le32(desc->sw.buf.data);
1616 }
1617 desc->phw->ctrl.data = cpu_to_le32(desc->sw.ctrl.data);
1618}
1619
1620static void get_rx_pkt(struct ksz_desc_info *info, struct ksz_desc **desc)
1621{
1622 *desc = &info->ring[info->last];
1623 info->last++;
1624 info->last &= info->mask;
1625 info->avail--;
1626 (*desc)->sw.buf.data &= ~KS_DESC_RX_MASK;
1627}
1628
1629static inline void set_rx_buf(struct ksz_desc *desc, u32 addr)
1630{
1631 desc->phw->addr = cpu_to_le32(addr);
1632}
1633
1634static inline void set_rx_len(struct ksz_desc *desc, u32 len)
1635{
1636 desc->sw.buf.rx.buf_size = len;
1637}
1638
1639static inline void get_tx_pkt(struct ksz_desc_info *info,
1640 struct ksz_desc **desc)
1641{
1642 *desc = &info->ring[info->next];
1643 info->next++;
1644 info->next &= info->mask;
1645 info->avail--;
1646 (*desc)->sw.buf.data &= ~KS_DESC_TX_MASK;
1647}
1648
1649static inline void set_tx_buf(struct ksz_desc *desc, u32 addr)
1650{
1651 desc->phw->addr = cpu_to_le32(addr);
1652}
1653
1654static inline void set_tx_len(struct ksz_desc *desc, u32 len)
1655{
1656 desc->sw.buf.tx.buf_size = len;
1657}
1658
1659/* Switch functions */
1660
1661#define TABLE_READ 0x10
1662#define TABLE_SEL_SHIFT 2
1663
1664#define HW_DELAY(hw, reg) \
1665 do { \
1666 u16 dummy; \
1667 dummy = readw(hw->io + reg); \
1668 } while (0)
1669
1670/**
1671 * sw_r_table - read 4 bytes of data from switch table
1672 * @hw: The hardware instance.
1673 * @table: The table selector.
1674 * @addr: The address of the table entry.
1675 * @data: Buffer to store the read data.
1676 *
1677 * This routine reads 4 bytes of data from the table of the switch.
1678 * Hardware interrupts are disabled to minimize corruption of read data.
1679 */
1680static void sw_r_table(struct ksz_hw *hw, int table, u16 addr, u32 *data)
1681{
1682 u16 ctrl_addr;
1683 uint interrupt;
1684
1685 ctrl_addr = (((table << TABLE_SEL_SHIFT) | TABLE_READ) << 8) | addr;
1686
1687 interrupt = hw_block_intr(hw);
1688
1689 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
1690 HW_DELAY(hw, KS884X_IACR_OFFSET);
1691 *data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
1692
1693 hw_restore_intr(hw, interrupt);
1694}
1695
1696/**
1697 * sw_w_table_64 - write 8 bytes of data to the switch table
1698 * @hw: The hardware instance.
1699 * @table: The table selector.
1700 * @addr: The address of the table entry.
1701 * @data_hi: The high part of data to be written (bit63 ~ bit32).
1702 * @data_lo: The low part of data to be written (bit31 ~ bit0).
1703 *
1704 * This routine writes 8 bytes of data to the table of the switch.
1705 * Hardware interrupts are disabled to minimize corruption of written data.
1706 */
1707static void sw_w_table_64(struct ksz_hw *hw, int table, u16 addr, u32 data_hi,
1708 u32 data_lo)
1709{
1710 u16 ctrl_addr;
1711 uint interrupt;
1712
1713 ctrl_addr = ((table << TABLE_SEL_SHIFT) << 8) | addr;
1714
1715 interrupt = hw_block_intr(hw);
1716
1717 writel(data_hi, hw->io + KS884X_ACC_DATA_4_OFFSET);
1718 writel(data_lo, hw->io + KS884X_ACC_DATA_0_OFFSET);
1719
1720 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
1721 HW_DELAY(hw, KS884X_IACR_OFFSET);
1722
1723 hw_restore_intr(hw, interrupt);
1724}
1725
1726/**
1727 * sw_w_sta_mac_table - write to the static MAC table
1728 * @hw: The hardware instance.
1729 * @addr: The address of the table entry.
1730 * @mac_addr: The MAC address.
1731 * @ports: The port members.
1732 * @override: The flag to override the port receive/transmit settings.
1733 * @valid: The flag to indicate entry is valid.
1734 * @use_fid: The flag to indicate the FID is valid.
1735 * @fid: The FID value.
1736 *
1737 * This routine writes an entry of the static MAC table of the switch. It
1738 * calls sw_w_table_64() to write the data.
1739 */
1740static void sw_w_sta_mac_table(struct ksz_hw *hw, u16 addr, u8 *mac_addr,
1741 u8 ports, int override, int valid, int use_fid, u8 fid)
1742{
1743 u32 data_hi;
1744 u32 data_lo;
1745
1746 data_lo = ((u32) mac_addr[2] << 24) |
1747 ((u32) mac_addr[3] << 16) |
1748 ((u32) mac_addr[4] << 8) | mac_addr[5];
1749 data_hi = ((u32) mac_addr[0] << 8) | mac_addr[1];
1750 data_hi |= (u32) ports << STATIC_MAC_FWD_PORTS_SHIFT;
1751
1752 if (override)
1753 data_hi |= STATIC_MAC_TABLE_OVERRIDE;
1754 if (use_fid) {
1755 data_hi |= STATIC_MAC_TABLE_USE_FID;
1756 data_hi |= (u32) fid << STATIC_MAC_FID_SHIFT;
1757 }
1758 if (valid)
1759 data_hi |= STATIC_MAC_TABLE_VALID;
1760
1761 sw_w_table_64(hw, TABLE_STATIC_MAC, addr, data_hi, data_lo);
1762}
1763
1764/**
1765 * sw_r_vlan_table - read from the VLAN table
1766 * @hw: The hardware instance.
1767 * @addr: The address of the table entry.
1768 * @vid: Buffer to store the VID.
1769 * @fid: Buffer to store the VID.
1770 * @member: Buffer to store the port membership.
1771 *
1772 * This function reads an entry of the VLAN table of the switch. It calls
1773 * sw_r_table() to get the data.
1774 *
1775 * Return 0 if the entry is valid; otherwise -1.
1776 */
1777static int sw_r_vlan_table(struct ksz_hw *hw, u16 addr, u16 *vid, u8 *fid,
1778 u8 *member)
1779{
1780 u32 data;
1781
1782 sw_r_table(hw, TABLE_VLAN, addr, &data);
1783 if (data & VLAN_TABLE_VALID) {
1784 *vid = (u16)(data & VLAN_TABLE_VID);
1785 *fid = (u8)((data & VLAN_TABLE_FID) >> VLAN_TABLE_FID_SHIFT);
1786 *member = (u8)((data & VLAN_TABLE_MEMBERSHIP) >>
1787 VLAN_TABLE_MEMBERSHIP_SHIFT);
1788 return 0;
1789 }
1790 return -1;
1791}
1792
1793/**
1794 * port_r_mib_cnt - read MIB counter
1795 * @hw: The hardware instance.
1796 * @port: The port index.
1797 * @addr: The address of the counter.
1798 * @cnt: Buffer to store the counter.
1799 *
1800 * This routine reads a MIB counter of the port.
1801 * Hardware interrupts are disabled to minimize corruption of read data.
1802 */
1803static void port_r_mib_cnt(struct ksz_hw *hw, int port, u16 addr, u64 *cnt)
1804{
1805 u32 data;
1806 u16 ctrl_addr;
1807 uint interrupt;
1808 int timeout;
1809
1810 ctrl_addr = addr + PORT_COUNTER_NUM * port;
1811
1812 interrupt = hw_block_intr(hw);
1813
1814 ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ) << 8);
1815 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
1816 HW_DELAY(hw, KS884X_IACR_OFFSET);
1817
1818 for (timeout = 100; timeout > 0; timeout--) {
1819 data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
1820
1821 if (data & MIB_COUNTER_VALID) {
1822 if (data & MIB_COUNTER_OVERFLOW)
1823 *cnt += MIB_COUNTER_VALUE + 1;
1824 *cnt += data & MIB_COUNTER_VALUE;
1825 break;
1826 }
1827 }
1828
1829 hw_restore_intr(hw, interrupt);
1830}
1831
1832/**
1833 * port_r_mib_pkt - read dropped packet counts
1834 * @hw: The hardware instance.
1835 * @port: The port index.
1836 * @cnt: Buffer to store the receive and transmit dropped packet counts.
1837 *
1838 * This routine reads the dropped packet counts of the port.
1839 * Hardware interrupts are disabled to minimize corruption of read data.
1840 */
1841static void port_r_mib_pkt(struct ksz_hw *hw, int port, u32 *last, u64 *cnt)
1842{
1843 u32 cur;
1844 u32 data;
1845 u16 ctrl_addr;
1846 uint interrupt;
1847 int index;
1848
1849 index = KS_MIB_PACKET_DROPPED_RX_0 + port;
1850 do {
1851 interrupt = hw_block_intr(hw);
1852
1853 ctrl_addr = (u16) index;
1854 ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ)
1855 << 8);
1856 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
1857 HW_DELAY(hw, KS884X_IACR_OFFSET);
1858 data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
1859
1860 hw_restore_intr(hw, interrupt);
1861
1862 data &= MIB_PACKET_DROPPED;
1863 cur = *last;
1864 if (data != cur) {
1865 *last = data;
1866 if (data < cur)
1867 data += MIB_PACKET_DROPPED + 1;
1868 data -= cur;
1869 *cnt += data;
1870 }
1871 ++last;
1872 ++cnt;
1873 index -= KS_MIB_PACKET_DROPPED_TX -
1874 KS_MIB_PACKET_DROPPED_TX_0 + 1;
1875 } while (index >= KS_MIB_PACKET_DROPPED_TX_0 + port);
1876}
1877
1878/**
1879 * port_r_cnt - read MIB counters periodically
1880 * @hw: The hardware instance.
1881 * @port: The port index.
1882 *
1883 * This routine is used to read the counters of the port periodically to avoid
1884 * counter overflow. The hardware should be acquired first before calling this
1885 * routine.
1886 *
1887 * Return non-zero when not all counters not read.
1888 */
1889static int port_r_cnt(struct ksz_hw *hw, int port)
1890{
1891 struct ksz_port_mib *mib = &hw->port_mib[port];
1892
1893 if (mib->mib_start < PORT_COUNTER_NUM)
1894 while (mib->cnt_ptr < PORT_COUNTER_NUM) {
1895 port_r_mib_cnt(hw, port, mib->cnt_ptr,
1896 &mib->counter[mib->cnt_ptr]);
1897 ++mib->cnt_ptr;
1898 }
1899 if (hw->mib_cnt > PORT_COUNTER_NUM)
1900 port_r_mib_pkt(hw, port, mib->dropped,
1901 &mib->counter[PORT_COUNTER_NUM]);
1902 mib->cnt_ptr = 0;
1903 return 0;
1904}
1905
1906/**
1907 * port_init_cnt - initialize MIB counter values
1908 * @hw: The hardware instance.
1909 * @port: The port index.
1910 *
1911 * This routine is used to initialize all counters to zero if the hardware
1912 * cannot do it after reset.
1913 */
1914static void port_init_cnt(struct ksz_hw *hw, int port)
1915{
1916 struct ksz_port_mib *mib = &hw->port_mib[port];
1917
1918 mib->cnt_ptr = 0;
1919 if (mib->mib_start < PORT_COUNTER_NUM)
1920 do {
1921 port_r_mib_cnt(hw, port, mib->cnt_ptr,
1922 &mib->counter[mib->cnt_ptr]);
1923 ++mib->cnt_ptr;
1924 } while (mib->cnt_ptr < PORT_COUNTER_NUM);
1925 if (hw->mib_cnt > PORT_COUNTER_NUM)
1926 port_r_mib_pkt(hw, port, mib->dropped,
1927 &mib->counter[PORT_COUNTER_NUM]);
1928 memset((void *) mib->counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
1929 mib->cnt_ptr = 0;
1930}
1931
1932/*
1933 * Port functions
1934 */
1935
1936/**
1937 * port_chk - check port register bits
1938 * @hw: The hardware instance.
1939 * @port: The port index.
1940 * @offset: The offset of the port register.
1941 * @bits: The data bits to check.
1942 *
1943 * This function checks whether the specified bits of the port register are set
1944 * or not.
1945 *
1946 * Return 0 if the bits are not set.
1947 */
1948static int port_chk(struct ksz_hw *hw, int port, int offset, u16 bits)
1949{
1950 u32 addr;
1951 u16 data;
1952
1953 PORT_CTRL_ADDR(port, addr);
1954 addr += offset;
1955 data = readw(hw->io + addr);
1956 return (data & bits) == bits;
1957}
1958
1959/**
1960 * port_cfg - set port register bits
1961 * @hw: The hardware instance.
1962 * @port: The port index.
1963 * @offset: The offset of the port register.
1964 * @bits: The data bits to set.
1965 * @set: The flag indicating whether the bits are to be set or not.
1966 *
1967 * This routine sets or resets the specified bits of the port register.
1968 */
1969static void port_cfg(struct ksz_hw *hw, int port, int offset, u16 bits,
1970 int set)
1971{
1972 u32 addr;
1973 u16 data;
1974
1975 PORT_CTRL_ADDR(port, addr);
1976 addr += offset;
1977 data = readw(hw->io + addr);
1978 if (set)
1979 data |= bits;
1980 else
1981 data &= ~bits;
1982 writew(data, hw->io + addr);
1983}
1984
1985/**
1986 * port_chk_shift - check port bit
1987 * @hw: The hardware instance.
1988 * @port: The port index.
1989 * @offset: The offset of the register.
1990 * @shift: Number of bits to shift.
1991 *
1992 * This function checks whether the specified port is set in the register or
1993 * not.
1994 *
1995 * Return 0 if the port is not set.
1996 */
1997static int port_chk_shift(struct ksz_hw *hw, int port, u32 addr, int shift)
1998{
1999 u16 data;
2000 u16 bit = 1 << port;
2001
2002 data = readw(hw->io + addr);
2003 data >>= shift;
2004 return (data & bit) == bit;
2005}
2006
2007/**
2008 * port_cfg_shift - set port bit
2009 * @hw: The hardware instance.
2010 * @port: The port index.
2011 * @offset: The offset of the register.
2012 * @shift: Number of bits to shift.
2013 * @set: The flag indicating whether the port is to be set or not.
2014 *
2015 * This routine sets or resets the specified port in the register.
2016 */
2017static void port_cfg_shift(struct ksz_hw *hw, int port, u32 addr, int shift,
2018 int set)
2019{
2020 u16 data;
2021 u16 bits = 1 << port;
2022
2023 data = readw(hw->io + addr);
2024 bits <<= shift;
2025 if (set)
2026 data |= bits;
2027 else
2028 data &= ~bits;
2029 writew(data, hw->io + addr);
2030}
2031
2032/**
2033 * port_r8 - read byte from port register
2034 * @hw: The hardware instance.
2035 * @port: The port index.
2036 * @offset: The offset of the port register.
2037 * @data: Buffer to store the data.
2038 *
2039 * This routine reads a byte from the port register.
2040 */
2041static void port_r8(struct ksz_hw *hw, int port, int offset, u8 *data)
2042{
2043 u32 addr;
2044
2045 PORT_CTRL_ADDR(port, addr);
2046 addr += offset;
2047 *data = readb(hw->io + addr);
2048}
2049
2050/**
2051 * port_r16 - read word from port register.
2052 * @hw: The hardware instance.
2053 * @port: The port index.
2054 * @offset: The offset of the port register.
2055 * @data: Buffer to store the data.
2056 *
2057 * This routine reads a word from the port register.
2058 */
2059static void port_r16(struct ksz_hw *hw, int port, int offset, u16 *data)
2060{
2061 u32 addr;
2062
2063 PORT_CTRL_ADDR(port, addr);
2064 addr += offset;
2065 *data = readw(hw->io + addr);
2066}
2067
2068/**
2069 * port_w16 - write word to port register.
2070 * @hw: The hardware instance.
2071 * @port: The port index.
2072 * @offset: The offset of the port register.
2073 * @data: Data to write.
2074 *
2075 * This routine writes a word to the port register.
2076 */
2077static void port_w16(struct ksz_hw *hw, int port, int offset, u16 data)
2078{
2079 u32 addr;
2080
2081 PORT_CTRL_ADDR(port, addr);
2082 addr += offset;
2083 writew(data, hw->io + addr);
2084}
2085
2086/**
2087 * sw_chk - check switch register bits
2088 * @hw: The hardware instance.
2089 * @addr: The address of the switch register.
2090 * @bits: The data bits to check.
2091 *
2092 * This function checks whether the specified bits of the switch register are
2093 * set or not.
2094 *
2095 * Return 0 if the bits are not set.
2096 */
2097static int sw_chk(struct ksz_hw *hw, u32 addr, u16 bits)
2098{
2099 u16 data;
2100
2101 data = readw(hw->io + addr);
2102 return (data & bits) == bits;
2103}
2104
2105/**
2106 * sw_cfg - set switch register bits
2107 * @hw: The hardware instance.
2108 * @addr: The address of the switch register.
2109 * @bits: The data bits to set.
2110 * @set: The flag indicating whether the bits are to be set or not.
2111 *
2112 * This function sets or resets the specified bits of the switch register.
2113 */
2114static void sw_cfg(struct ksz_hw *hw, u32 addr, u16 bits, int set)
2115{
2116 u16 data;
2117
2118 data = readw(hw->io + addr);
2119 if (set)
2120 data |= bits;
2121 else
2122 data &= ~bits;
2123 writew(data, hw->io + addr);
2124}
2125
2126/* Bandwidth */
2127
2128static inline void port_cfg_broad_storm(struct ksz_hw *hw, int p, int set)
2129{
2130 port_cfg(hw, p,
2131 KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM, set);
2132}
2133
2134static inline int port_chk_broad_storm(struct ksz_hw *hw, int p)
2135{
2136 return port_chk(hw, p,
2137 KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM);
2138}
2139
2140/* Driver set switch broadcast storm protection at 10% rate. */
2141#define BROADCAST_STORM_PROTECTION_RATE 10
2142
2143/* 148,800 frames * 67 ms / 100 */
2144#define BROADCAST_STORM_VALUE 9969
2145
2146/**
2147 * sw_cfg_broad_storm - configure broadcast storm threshold
2148 * @hw: The hardware instance.
2149 * @percent: Broadcast storm threshold in percent of transmit rate.
2150 *
2151 * This routine configures the broadcast storm threshold of the switch.
2152 */
2153static void sw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
2154{
2155 u16 data;
2156 u32 value = ((u32) BROADCAST_STORM_VALUE * (u32) percent / 100);
2157
2158 if (value > BROADCAST_STORM_RATE)
2159 value = BROADCAST_STORM_RATE;
2160
2161 data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
2162 data &= ~(BROADCAST_STORM_RATE_LO | BROADCAST_STORM_RATE_HI);
2163 data |= ((value & 0x00FF) << 8) | ((value & 0xFF00) >> 8);
2164 writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
2165}
2166
2167/**
2168 * sw_get_board_storm - get broadcast storm threshold
2169 * @hw: The hardware instance.
2170 * @percent: Buffer to store the broadcast storm threshold percentage.
2171 *
2172 * This routine retrieves the broadcast storm threshold of the switch.
2173 */
2174static void sw_get_broad_storm(struct ksz_hw *hw, u8 *percent)
2175{
2176 int num;
2177 u16 data;
2178
2179 data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
2180 num = (data & BROADCAST_STORM_RATE_HI);
2181 num <<= 8;
2182 num |= (data & BROADCAST_STORM_RATE_LO) >> 8;
2183 num = (num * 100 + BROADCAST_STORM_VALUE / 2) / BROADCAST_STORM_VALUE;
2184 *percent = (u8) num;
2185}
2186
2187/**
2188 * sw_dis_broad_storm - disable broadstorm
2189 * @hw: The hardware instance.
2190 * @port: The port index.
2191 *
2192 * This routine disables the broadcast storm limit function of the switch.
2193 */
2194static void sw_dis_broad_storm(struct ksz_hw *hw, int port)
2195{
2196 port_cfg_broad_storm(hw, port, 0);
2197}
2198
2199/**
2200 * sw_ena_broad_storm - enable broadcast storm
2201 * @hw: The hardware instance.
2202 * @port: The port index.
2203 *
2204 * This routine enables the broadcast storm limit function of the switch.
2205 */
2206static void sw_ena_broad_storm(struct ksz_hw *hw, int port)
2207{
2208 sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
2209 port_cfg_broad_storm(hw, port, 1);
2210}
2211
2212/**
2213 * sw_init_broad_storm - initialize broadcast storm
2214 * @hw: The hardware instance.
2215 *
2216 * This routine initializes the broadcast storm limit function of the switch.
2217 */
2218static void sw_init_broad_storm(struct ksz_hw *hw)
2219{
2220 int port;
2221
2222 hw->ksz_switch->broad_per = 1;
2223 sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
2224 for (port = 0; port < TOTAL_PORT_NUM; port++)
2225 sw_dis_broad_storm(hw, port);
2226 sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, MULTICAST_STORM_DISABLE, 1);
2227}
2228
2229/**
2230 * hw_cfg_broad_storm - configure broadcast storm
2231 * @hw: The hardware instance.
2232 * @percent: Broadcast storm threshold in percent of transmit rate.
2233 *
2234 * This routine configures the broadcast storm threshold of the switch.
2235 * It is called by user functions. The hardware should be acquired first.
2236 */
2237static void hw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
2238{
2239 if (percent > 100)
2240 percent = 100;
2241
2242 sw_cfg_broad_storm(hw, percent);
2243 sw_get_broad_storm(hw, &percent);
2244 hw->ksz_switch->broad_per = percent;
2245}
2246
2247/**
2248 * sw_dis_prio_rate - disable switch priority rate
2249 * @hw: The hardware instance.
2250 * @port: The port index.
2251 *
2252 * This routine disables the priority rate function of the switch.
2253 */
2254static void sw_dis_prio_rate(struct ksz_hw *hw, int port)
2255{
2256 u32 addr;
2257
2258 PORT_CTRL_ADDR(port, addr);
2259 addr += KS8842_PORT_IN_RATE_OFFSET;
2260 writel(0, hw->io + addr);
2261}
2262
2263/**
2264 * sw_init_prio_rate - initialize switch prioirty rate
2265 * @hw: The hardware instance.
2266 *
2267 * This routine initializes the priority rate function of the switch.
2268 */
2269static void sw_init_prio_rate(struct ksz_hw *hw)
2270{
2271 int port;
2272 int prio;
2273 struct ksz_switch *sw = hw->ksz_switch;
2274
2275 for (port = 0; port < TOTAL_PORT_NUM; port++) {
2276 for (prio = 0; prio < PRIO_QUEUES; prio++) {
2277 sw->port_cfg[port].rx_rate[prio] =
2278 sw->port_cfg[port].tx_rate[prio] = 0;
2279 }
2280 sw_dis_prio_rate(hw, port);
2281 }
2282}
2283
2284/* Communication */
2285
2286static inline void port_cfg_back_pressure(struct ksz_hw *hw, int p, int set)
2287{
2288 port_cfg(hw, p,
2289 KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE, set);
2290}
2291
2292static inline void port_cfg_force_flow_ctrl(struct ksz_hw *hw, int p, int set)
2293{
2294 port_cfg(hw, p,
2295 KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL, set);
2296}
2297
2298static inline int port_chk_back_pressure(struct ksz_hw *hw, int p)
2299{
2300 return port_chk(hw, p,
2301 KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE);
2302}
2303
2304static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p)
2305{
2306 return port_chk(hw, p,
2307 KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL);
2308}
2309
2310/* Spanning Tree */
2311
2312static inline void port_cfg_dis_learn(struct ksz_hw *hw, int p, int set)
2313{
2314 port_cfg(hw, p,
2315 KS8842_PORT_CTRL_2_OFFSET, PORT_LEARN_DISABLE, set);
2316}
2317
2318static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set)
2319{
2320 port_cfg(hw, p,
2321 KS8842_PORT_CTRL_2_OFFSET, PORT_RX_ENABLE, set);
2322}
2323
2324static inline void port_cfg_tx(struct ksz_hw *hw, int p, int set)
2325{
2326 port_cfg(hw, p,
2327 KS8842_PORT_CTRL_2_OFFSET, PORT_TX_ENABLE, set);
2328}
2329
2330static inline void sw_cfg_fast_aging(struct ksz_hw *hw, int set)
2331{
2332 sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, SWITCH_FAST_AGING, set);
2333}
2334
2335static inline void sw_flush_dyn_mac_table(struct ksz_hw *hw)
2336{
2337 if (!(hw->overrides & FAST_AGING)) {
2338 sw_cfg_fast_aging(hw, 1);
2339 mdelay(1);
2340 sw_cfg_fast_aging(hw, 0);
2341 }
2342}
2343
2344/* VLAN */
2345
2346static inline void port_cfg_ins_tag(struct ksz_hw *hw, int p, int insert)
2347{
2348 port_cfg(hw, p,
2349 KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG, insert);
2350}
2351
2352static inline void port_cfg_rmv_tag(struct ksz_hw *hw, int p, int remove)
2353{
2354 port_cfg(hw, p,
2355 KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG, remove);
2356}
2357
2358static inline int port_chk_ins_tag(struct ksz_hw *hw, int p)
2359{
2360 return port_chk(hw, p,
2361 KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG);
2362}
2363
2364static inline int port_chk_rmv_tag(struct ksz_hw *hw, int p)
2365{
2366 return port_chk(hw, p,
2367 KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG);
2368}
2369
2370static inline void port_cfg_dis_non_vid(struct ksz_hw *hw, int p, int set)
2371{
2372 port_cfg(hw, p,
2373 KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID, set);
2374}
2375
2376static inline void port_cfg_in_filter(struct ksz_hw *hw, int p, int set)
2377{
2378 port_cfg(hw, p,
2379 KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER, set);
2380}
2381
2382static inline int port_chk_dis_non_vid(struct ksz_hw *hw, int p)
2383{
2384 return port_chk(hw, p,
2385 KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID);
2386}
2387
2388static inline int port_chk_in_filter(struct ksz_hw *hw, int p)
2389{
2390 return port_chk(hw, p,
2391 KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER);
2392}
2393
2394/* Mirroring */
2395
2396static inline void port_cfg_mirror_sniffer(struct ksz_hw *hw, int p, int set)
2397{
2398 port_cfg(hw, p,
2399 KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_SNIFFER, set);
2400}
2401
2402static inline void port_cfg_mirror_rx(struct ksz_hw *hw, int p, int set)
2403{
2404 port_cfg(hw, p,
2405 KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_RX, set);
2406}
2407
2408static inline void port_cfg_mirror_tx(struct ksz_hw *hw, int p, int set)
2409{
2410 port_cfg(hw, p,
2411 KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_TX, set);
2412}
2413
2414static inline void sw_cfg_mirror_rx_tx(struct ksz_hw *hw, int set)
2415{
2416 sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, SWITCH_MIRROR_RX_TX, set);
2417}
2418
2419static void sw_init_mirror(struct ksz_hw *hw)
2420{
2421 int port;
2422
2423 for (port = 0; port < TOTAL_PORT_NUM; port++) {
2424 port_cfg_mirror_sniffer(hw, port, 0);
2425 port_cfg_mirror_rx(hw, port, 0);
2426 port_cfg_mirror_tx(hw, port, 0);
2427 }
2428 sw_cfg_mirror_rx_tx(hw, 0);
2429}
2430
2431static inline void sw_cfg_unk_def_deliver(struct ksz_hw *hw, int set)
2432{
2433 sw_cfg(hw, KS8842_SWITCH_CTRL_7_OFFSET,
2434 SWITCH_UNK_DEF_PORT_ENABLE, set);
2435}
2436
2437static inline int sw_cfg_chk_unk_def_deliver(struct ksz_hw *hw)
2438{
2439 return sw_chk(hw, KS8842_SWITCH_CTRL_7_OFFSET,
2440 SWITCH_UNK_DEF_PORT_ENABLE);
2441}
2442
2443static inline void sw_cfg_unk_def_port(struct ksz_hw *hw, int port, int set)
2444{
2445 port_cfg_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0, set);
2446}
2447
2448static inline int sw_chk_unk_def_port(struct ksz_hw *hw, int port)
2449{
2450 return port_chk_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0);
2451}
2452
2453/* Priority */
2454
2455static inline void port_cfg_diffserv(struct ksz_hw *hw, int p, int set)
2456{
2457 port_cfg(hw, p,
2458 KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE, set);
2459}
2460
2461static inline void port_cfg_802_1p(struct ksz_hw *hw, int p, int set)
2462{
2463 port_cfg(hw, p,
2464 KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE, set);
2465}
2466
2467static inline void port_cfg_replace_vid(struct ksz_hw *hw, int p, int set)
2468{
2469 port_cfg(hw, p,
2470 KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING, set);
2471}
2472
2473static inline void port_cfg_prio(struct ksz_hw *hw, int p, int set)
2474{
2475 port_cfg(hw, p,
2476 KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE, set);
2477}
2478
2479static inline int port_chk_diffserv(struct ksz_hw *hw, int p)
2480{
2481 return port_chk(hw, p,
2482 KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE);
2483}
2484
2485static inline int port_chk_802_1p(struct ksz_hw *hw, int p)
2486{
2487 return port_chk(hw, p,
2488 KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE);
2489}
2490
2491static inline int port_chk_replace_vid(struct ksz_hw *hw, int p)
2492{
2493 return port_chk(hw, p,
2494 KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING);
2495}
2496
2497static inline int port_chk_prio(struct ksz_hw *hw, int p)
2498{
2499 return port_chk(hw, p,
2500 KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE);
2501}
2502
2503/**
2504 * sw_dis_diffserv - disable switch DiffServ priority
2505 * @hw: The hardware instance.
2506 * @port: The port index.
2507 *
2508 * This routine disables the DiffServ priority function of the switch.
2509 */
2510static void sw_dis_diffserv(struct ksz_hw *hw, int port)
2511{
2512 port_cfg_diffserv(hw, port, 0);
2513}
2514
2515/**
2516 * sw_dis_802_1p - disable switch 802.1p priority
2517 * @hw: The hardware instance.
2518 * @port: The port index.
2519 *
2520 * This routine disables the 802.1p priority function of the switch.
2521 */
2522static void sw_dis_802_1p(struct ksz_hw *hw, int port)
2523{
2524 port_cfg_802_1p(hw, port, 0);
2525}
2526
2527/**
2528 * sw_cfg_replace_null_vid -
2529 * @hw: The hardware instance.
2530 * @set: The flag to disable or enable.
2531 *
2532 */
2533static void sw_cfg_replace_null_vid(struct ksz_hw *hw, int set)
2534{
2535 sw_cfg(hw, KS8842_SWITCH_CTRL_3_OFFSET, SWITCH_REPLACE_NULL_VID, set);
2536}
2537
2538/**
2539 * sw_cfg_replace_vid - enable switch 802.10 priority re-mapping
2540 * @hw: The hardware instance.
2541 * @port: The port index.
2542 * @set: The flag to disable or enable.
2543 *
2544 * This routine enables the 802.1p priority re-mapping function of the switch.
2545 * That allows 802.1p priority field to be replaced with the port's default
2546 * tag's priority value if the ingress packet's 802.1p priority has a higher
2547 * priority than port's default tag's priority.
2548 */
2549static void sw_cfg_replace_vid(struct ksz_hw *hw, int port, int set)
2550{
2551 port_cfg_replace_vid(hw, port, set);
2552}
2553
2554/**
2555 * sw_cfg_port_based - configure switch port based priority
2556 * @hw: The hardware instance.
2557 * @port: The port index.
2558 * @prio: The priority to set.
2559 *
2560 * This routine configures the port based priority of the switch.
2561 */
2562static void sw_cfg_port_based(struct ksz_hw *hw, int port, u8 prio)
2563{
2564 u16 data;
2565
2566 if (prio > PORT_BASED_PRIORITY_BASE)
2567 prio = PORT_BASED_PRIORITY_BASE;
2568
2569 hw->ksz_switch->port_cfg[port].port_prio = prio;
2570
2571 port_r16(hw, port, KS8842_PORT_CTRL_1_OFFSET, &data);
2572 data &= ~PORT_BASED_PRIORITY_MASK;
2573 data |= prio << PORT_BASED_PRIORITY_SHIFT;
2574 port_w16(hw, port, KS8842_PORT_CTRL_1_OFFSET, data);
2575}
2576
2577/**
2578 * sw_dis_multi_queue - disable transmit multiple queues
2579 * @hw: The hardware instance.
2580 * @port: The port index.
2581 *
2582 * This routine disables the transmit multiple queues selection of the switch
2583 * port. Only single transmit queue on the port.
2584 */
2585static void sw_dis_multi_queue(struct ksz_hw *hw, int port)
2586{
2587 port_cfg_prio(hw, port, 0);
2588}
2589
2590/**
2591 * sw_init_prio - initialize switch priority
2592 * @hw: The hardware instance.
2593 *
2594 * This routine initializes the switch QoS priority functions.
2595 */
2596static void sw_init_prio(struct ksz_hw *hw)
2597{
2598 int port;
2599 int tos;
2600 struct ksz_switch *sw = hw->ksz_switch;
2601
2602 /*
2603 * Init all the 802.1p tag priority value to be assigned to different
2604 * priority queue.
2605 */
2606 sw->p_802_1p[0] = 0;
2607 sw->p_802_1p[1] = 0;
2608 sw->p_802_1p[2] = 1;
2609 sw->p_802_1p[3] = 1;
2610 sw->p_802_1p[4] = 2;
2611 sw->p_802_1p[5] = 2;
2612 sw->p_802_1p[6] = 3;
2613 sw->p_802_1p[7] = 3;
2614
2615 /*
2616 * Init all the DiffServ priority value to be assigned to priority
2617 * queue 0.
2618 */
2619 for (tos = 0; tos < DIFFSERV_ENTRIES; tos++)
2620 sw->diffserv[tos] = 0;
2621
2622 /* All QoS functions disabled. */
2623 for (port = 0; port < TOTAL_PORT_NUM; port++) {
2624 sw_dis_multi_queue(hw, port);
2625 sw_dis_diffserv(hw, port);
2626 sw_dis_802_1p(hw, port);
2627 sw_cfg_replace_vid(hw, port, 0);
2628
2629 sw->port_cfg[port].port_prio = 0;
2630 sw_cfg_port_based(hw, port, sw->port_cfg[port].port_prio);
2631 }
2632 sw_cfg_replace_null_vid(hw, 0);
2633}
2634
2635/**
2636 * port_get_def_vid - get port default VID.
2637 * @hw: The hardware instance.
2638 * @port: The port index.
2639 * @vid: Buffer to store the VID.
2640 *
2641 * This routine retrieves the default VID of the port.
2642 */
2643static void port_get_def_vid(struct ksz_hw *hw, int port, u16 *vid)
2644{
2645 u32 addr;
2646
2647 PORT_CTRL_ADDR(port, addr);
2648 addr += KS8842_PORT_CTRL_VID_OFFSET;
2649 *vid = readw(hw->io + addr);
2650}
2651
2652/**
2653 * sw_init_vlan - initialize switch VLAN
2654 * @hw: The hardware instance.
2655 *
2656 * This routine initializes the VLAN function of the switch.
2657 */
2658static void sw_init_vlan(struct ksz_hw *hw)
2659{
2660 int port;
2661 int entry;
2662 struct ksz_switch *sw = hw->ksz_switch;
2663
2664 /* Read 16 VLAN entries from device's VLAN table. */
2665 for (entry = 0; entry < VLAN_TABLE_ENTRIES; entry++) {
2666 sw_r_vlan_table(hw, entry,
2667 &sw->vlan_table[entry].vid,
2668 &sw->vlan_table[entry].fid,
2669 &sw->vlan_table[entry].member);
2670 }
2671
2672 for (port = 0; port < TOTAL_PORT_NUM; port++) {
2673 port_get_def_vid(hw, port, &sw->port_cfg[port].vid);
2674 sw->port_cfg[port].member = PORT_MASK;
2675 }
2676}
2677
2678/**
2679 * sw_cfg_port_base_vlan - configure port-based VLAN membership
2680 * @hw: The hardware instance.
2681 * @port: The port index.
2682 * @member: The port-based VLAN membership.
2683 *
2684 * This routine configures the port-based VLAN membership of the port.
2685 */
2686static void sw_cfg_port_base_vlan(struct ksz_hw *hw, int port, u8 member)
2687{
2688 u32 addr;
2689 u8 data;
2690
2691 PORT_CTRL_ADDR(port, addr);
2692 addr += KS8842_PORT_CTRL_2_OFFSET;
2693
2694 data = readb(hw->io + addr);
2695 data &= ~PORT_VLAN_MEMBERSHIP;
2696 data |= (member & PORT_MASK);
2697 writeb(data, hw->io + addr);
2698
2699 hw->ksz_switch->port_cfg[port].member = member;
2700}
2701
2702/**
2703 * sw_get_addr - get the switch MAC address.
2704 * @hw: The hardware instance.
2705 * @mac_addr: Buffer to store the MAC address.
2706 *
2707 * This function retrieves the MAC address of the switch.
2708 */
2709static inline void sw_get_addr(struct ksz_hw *hw, u8 *mac_addr)
2710{
2711 int i;
2712
2713 for (i = 0; i < 6; i += 2) {
2714 mac_addr[i] = readb(hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
2715 mac_addr[1 + i] = readb(hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
2716 }
2717}
2718
2719/**
2720 * sw_set_addr - configure switch MAC address
2721 * @hw: The hardware instance.
2722 * @mac_addr: The MAC address.
2723 *
2724 * This function configures the MAC address of the switch.
2725 */
2726static void sw_set_addr(struct ksz_hw *hw, u8 *mac_addr)
2727{
2728 int i;
2729
2730 for (i = 0; i < 6; i += 2) {
2731 writeb(mac_addr[i], hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
2732 writeb(mac_addr[1 + i], hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
2733 }
2734}
2735
2736/**
2737 * sw_set_global_ctrl - set switch global control
2738 * @hw: The hardware instance.
2739 *
2740 * This routine sets the global control of the switch function.
2741 */
2742static void sw_set_global_ctrl(struct ksz_hw *hw)
2743{
2744 u16 data;
2745
2746 /* Enable switch MII flow control. */
2747 data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
2748 data |= SWITCH_FLOW_CTRL;
2749 writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
2750
2751 data = readw(hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
2752
2753 /* Enable aggressive back off algorithm in half duplex mode. */
2754 data |= SWITCH_AGGR_BACKOFF;
2755
2756 /* Enable automatic fast aging when link changed detected. */
2757 data |= SWITCH_AGING_ENABLE;
2758 data |= SWITCH_LINK_AUTO_AGING;
2759
2760 if (hw->overrides & FAST_AGING)
2761 data |= SWITCH_FAST_AGING;
2762 else
2763 data &= ~SWITCH_FAST_AGING;
2764 writew(data, hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
2765
2766 data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
2767
2768 /* Enable no excessive collision drop. */
2769 data |= NO_EXC_COLLISION_DROP;
2770 writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
2771}
2772
2773enum {
2774 STP_STATE_DISABLED = 0,
2775 STP_STATE_LISTENING,
2776 STP_STATE_LEARNING,
2777 STP_STATE_FORWARDING,
2778 STP_STATE_BLOCKED,
2779 STP_STATE_SIMPLE
2780};
2781
2782/**
2783 * port_set_stp_state - configure port spanning tree state
2784 * @hw: The hardware instance.
2785 * @port: The port index.
2786 * @state: The spanning tree state.
2787 *
2788 * This routine configures the spanning tree state of the port.
2789 */
2790static void port_set_stp_state(struct ksz_hw *hw, int port, int state)
2791{
2792 u16 data;
2793
2794 port_r16(hw, port, KS8842_PORT_CTRL_2_OFFSET, &data);
2795 switch (state) {
2796 case STP_STATE_DISABLED:
2797 data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
2798 data |= PORT_LEARN_DISABLE;
2799 break;
2800 case STP_STATE_LISTENING:
2801/*
2802 * No need to turn on transmit because of port direct mode.
2803 * Turning on receive is required if static MAC table is not setup.
2804 */
2805 data &= ~PORT_TX_ENABLE;
2806 data |= PORT_RX_ENABLE;
2807 data |= PORT_LEARN_DISABLE;
2808 break;
2809 case STP_STATE_LEARNING:
2810 data &= ~PORT_TX_ENABLE;
2811 data |= PORT_RX_ENABLE;
2812 data &= ~PORT_LEARN_DISABLE;
2813 break;
2814 case STP_STATE_FORWARDING:
2815 data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
2816 data &= ~PORT_LEARN_DISABLE;
2817 break;
2818 case STP_STATE_BLOCKED:
2819/*
2820 * Need to setup static MAC table with override to keep receiving BPDU
2821 * messages. See sw_init_stp routine.
2822 */
2823 data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
2824 data |= PORT_LEARN_DISABLE;
2825 break;
2826 case STP_STATE_SIMPLE:
2827 data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
2828 data |= PORT_LEARN_DISABLE;
2829 break;
2830 }
2831 port_w16(hw, port, KS8842_PORT_CTRL_2_OFFSET, data);
2832 hw->ksz_switch->port_cfg[port].stp_state = state;
2833}
2834
2835#define STP_ENTRY 0
2836#define BROADCAST_ENTRY 1
2837#define BRIDGE_ADDR_ENTRY 2
2838#define IPV6_ADDR_ENTRY 3
2839
2840/**
2841 * sw_clr_sta_mac_table - clear static MAC table
2842 * @hw: The hardware instance.
2843 *
2844 * This routine clears the static MAC table.
2845 */
2846static void sw_clr_sta_mac_table(struct ksz_hw *hw)
2847{
2848 struct ksz_mac_table *entry;
2849 int i;
2850
2851 for (i = 0; i < STATIC_MAC_TABLE_ENTRIES; i++) {
2852 entry = &hw->ksz_switch->mac_table[i];
2853 sw_w_sta_mac_table(hw, i,
2854 entry->mac_addr, entry->ports,
2855 entry->override, 0,
2856 entry->use_fid, entry->fid);
2857 }
2858}
2859
2860/**
2861 * sw_init_stp - initialize switch spanning tree support
2862 * @hw: The hardware instance.
2863 *
2864 * This routine initializes the spanning tree support of the switch.
2865 */
2866static void sw_init_stp(struct ksz_hw *hw)
2867{
2868 struct ksz_mac_table *entry;
2869
2870 entry = &hw->ksz_switch->mac_table[STP_ENTRY];
2871 entry->mac_addr[0] = 0x01;
2872 entry->mac_addr[1] = 0x80;
2873 entry->mac_addr[2] = 0xC2;
2874 entry->mac_addr[3] = 0x00;
2875 entry->mac_addr[4] = 0x00;
2876 entry->mac_addr[5] = 0x00;
2877 entry->ports = HOST_MASK;
2878 entry->override = 1;
2879 entry->valid = 1;
2880 sw_w_sta_mac_table(hw, STP_ENTRY,
2881 entry->mac_addr, entry->ports,
2882 entry->override, entry->valid,
2883 entry->use_fid, entry->fid);
2884}
2885
2886/**
2887 * sw_block_addr - block certain packets from the host port
2888 * @hw: The hardware instance.
2889 *
2890 * This routine blocks certain packets from reaching to the host port.
2891 */
2892static void sw_block_addr(struct ksz_hw *hw)
2893{
2894 struct ksz_mac_table *entry;
2895 int i;
2896
2897 for (i = BROADCAST_ENTRY; i <= IPV6_ADDR_ENTRY; i++) {
2898 entry = &hw->ksz_switch->mac_table[i];
2899 entry->valid = 0;
2900 sw_w_sta_mac_table(hw, i,
2901 entry->mac_addr, entry->ports,
2902 entry->override, entry->valid,
2903 entry->use_fid, entry->fid);
2904 }
2905}
2906
2907#define PHY_LINK_SUPPORT \
2908 (PHY_AUTO_NEG_ASYM_PAUSE | \
2909 PHY_AUTO_NEG_SYM_PAUSE | \
2910 PHY_AUTO_NEG_100BT4 | \
2911 PHY_AUTO_NEG_100BTX_FD | \
2912 PHY_AUTO_NEG_100BTX | \
2913 PHY_AUTO_NEG_10BT_FD | \
2914 PHY_AUTO_NEG_10BT)
2915
2916static inline void hw_r_phy_ctrl(struct ksz_hw *hw, int phy, u16 *data)
2917{
2918 *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
2919}
2920
2921static inline void hw_w_phy_ctrl(struct ksz_hw *hw, int phy, u16 data)
2922{
2923 writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
2924}
2925
2926static inline void hw_r_phy_link_stat(struct ksz_hw *hw, int phy, u16 *data)
2927{
2928 *data = readw(hw->io + phy + KS884X_PHY_STATUS_OFFSET);
2929}
2930
2931static inline void hw_r_phy_auto_neg(struct ksz_hw *hw, int phy, u16 *data)
2932{
2933 *data = readw(hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
2934}
2935
2936static inline void hw_w_phy_auto_neg(struct ksz_hw *hw, int phy, u16 data)
2937{
2938 writew(data, hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
2939}
2940
2941static inline void hw_r_phy_rem_cap(struct ksz_hw *hw, int phy, u16 *data)
2942{
2943 *data = readw(hw->io + phy + KS884X_PHY_REMOTE_CAP_OFFSET);
2944}
2945
2946static inline void hw_r_phy_crossover(struct ksz_hw *hw, int phy, u16 *data)
2947{
2948 *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
2949}
2950
2951static inline void hw_w_phy_crossover(struct ksz_hw *hw, int phy, u16 data)
2952{
2953 writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
2954}
2955
2956static inline void hw_r_phy_polarity(struct ksz_hw *hw, int phy, u16 *data)
2957{
2958 *data = readw(hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
2959}
2960
2961static inline void hw_w_phy_polarity(struct ksz_hw *hw, int phy, u16 data)
2962{
2963 writew(data, hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
2964}
2965
2966static inline void hw_r_phy_link_md(struct ksz_hw *hw, int phy, u16 *data)
2967{
2968 *data = readw(hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
2969}
2970
2971static inline void hw_w_phy_link_md(struct ksz_hw *hw, int phy, u16 data)
2972{
2973 writew(data, hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
2974}
2975
2976/**
2977 * hw_r_phy - read data from PHY register
2978 * @hw: The hardware instance.
2979 * @port: Port to read.
2980 * @reg: PHY register to read.
2981 * @val: Buffer to store the read data.
2982 *
2983 * This routine reads data from the PHY register.
2984 */
2985static void hw_r_phy(struct ksz_hw *hw, int port, u16 reg, u16 *val)
2986{
2987 int phy;
2988
2989 phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
2990 *val = readw(hw->io + phy);
2991}
2992
2993/**
2994 * port_w_phy - write data to PHY register
2995 * @hw: The hardware instance.
2996 * @port: Port to write.
2997 * @reg: PHY register to write.
2998 * @val: Word data to write.
2999 *
3000 * This routine writes data to the PHY register.
3001 */
3002static void hw_w_phy(struct ksz_hw *hw, int port, u16 reg, u16 val)
3003{
3004 int phy;
3005
3006 phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
3007 writew(val, hw->io + phy);
3008}
3009
3010/*
3011 * EEPROM access functions
3012 */
3013
3014#define AT93C_CODE 0
3015#define AT93C_WR_OFF 0x00
3016#define AT93C_WR_ALL 0x10
3017#define AT93C_ER_ALL 0x20
3018#define AT93C_WR_ON 0x30
3019
3020#define AT93C_WRITE 1
3021#define AT93C_READ 2
3022#define AT93C_ERASE 3
3023
3024#define EEPROM_DELAY 4
3025
3026static inline void drop_gpio(struct ksz_hw *hw, u8 gpio)
3027{
3028 u16 data;
3029
3030 data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
3031 data &= ~gpio;
3032 writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
3033}
3034
3035static inline void raise_gpio(struct ksz_hw *hw, u8 gpio)
3036{
3037 u16 data;
3038
3039 data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
3040 data |= gpio;
3041 writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
3042}
3043
3044static inline u8 state_gpio(struct ksz_hw *hw, u8 gpio)
3045{
3046 u16 data;
3047
3048 data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
3049 return (u8)(data & gpio);
3050}
3051
3052static void eeprom_clk(struct ksz_hw *hw)
3053{
3054 raise_gpio(hw, EEPROM_SERIAL_CLOCK);
3055 udelay(EEPROM_DELAY);
3056 drop_gpio(hw, EEPROM_SERIAL_CLOCK);
3057 udelay(EEPROM_DELAY);
3058}
3059
3060static u16 spi_r(struct ksz_hw *hw)
3061{
3062 int i;
3063 u16 temp = 0;
3064
3065 for (i = 15; i >= 0; i--) {
3066 raise_gpio(hw, EEPROM_SERIAL_CLOCK);
3067 udelay(EEPROM_DELAY);
3068
3069 temp |= (state_gpio(hw, EEPROM_DATA_IN)) ? 1 << i : 0;
3070
3071 drop_gpio(hw, EEPROM_SERIAL_CLOCK);
3072 udelay(EEPROM_DELAY);
3073 }
3074 return temp;
3075}
3076
3077static void spi_w(struct ksz_hw *hw, u16 data)
3078{
3079 int i;
3080
3081 for (i = 15; i >= 0; i--) {
3082 (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
3083 drop_gpio(hw, EEPROM_DATA_OUT);
3084 eeprom_clk(hw);
3085 }
3086}
3087
3088static void spi_reg(struct ksz_hw *hw, u8 data, u8 reg)
3089{
3090 int i;
3091
3092 /* Initial start bit */
3093 raise_gpio(hw, EEPROM_DATA_OUT);
3094 eeprom_clk(hw);
3095
3096 /* AT93C operation */
3097 for (i = 1; i >= 0; i--) {
3098 (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
3099 drop_gpio(hw, EEPROM_DATA_OUT);
3100 eeprom_clk(hw);
3101 }
3102
3103 /* Address location */
3104 for (i = 5; i >= 0; i--) {
3105 (reg & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
3106 drop_gpio(hw, EEPROM_DATA_OUT);
3107 eeprom_clk(hw);
3108 }
3109}
3110
3111#define EEPROM_DATA_RESERVED 0
3112#define EEPROM_DATA_MAC_ADDR_0 1
3113#define EEPROM_DATA_MAC_ADDR_1 2
3114#define EEPROM_DATA_MAC_ADDR_2 3
3115#define EEPROM_DATA_SUBSYS_ID 4
3116#define EEPROM_DATA_SUBSYS_VEN_ID 5
3117#define EEPROM_DATA_PM_CAP 6
3118
3119/* User defined EEPROM data */
3120#define EEPROM_DATA_OTHER_MAC_ADDR 9
3121
3122/**
3123 * eeprom_read - read from AT93C46 EEPROM
3124 * @hw: The hardware instance.
3125 * @reg: The register offset.
3126 *
3127 * This function reads a word from the AT93C46 EEPROM.
3128 *
3129 * Return the data value.
3130 */
3131static u16 eeprom_read(struct ksz_hw *hw, u8 reg)
3132{
3133 u16 data;
3134
3135 raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
3136
3137 spi_reg(hw, AT93C_READ, reg);
3138 data = spi_r(hw);
3139
3140 drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
3141
3142 return data;
3143}
3144
3145/**
3146 * eeprom_write - write to AT93C46 EEPROM
3147 * @hw: The hardware instance.
3148 * @reg: The register offset.
3149 * @data: The data value.
3150 *
3151 * This procedure writes a word to the AT93C46 EEPROM.
3152 */
3153static void eeprom_write(struct ksz_hw *hw, u8 reg, u16 data)
3154{
3155 int timeout;
3156
3157 raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
3158
3159 /* Enable write. */
3160 spi_reg(hw, AT93C_CODE, AT93C_WR_ON);
3161 drop_gpio(hw, EEPROM_CHIP_SELECT);
3162 udelay(1);
3163
3164 /* Erase the register. */
3165 raise_gpio(hw, EEPROM_CHIP_SELECT);
3166 spi_reg(hw, AT93C_ERASE, reg);
3167 drop_gpio(hw, EEPROM_CHIP_SELECT);
3168 udelay(1);
3169
3170 /* Check operation complete. */
3171 raise_gpio(hw, EEPROM_CHIP_SELECT);
3172 timeout = 8;
3173 mdelay(2);
3174 do {
3175 mdelay(1);
3176 } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
3177 drop_gpio(hw, EEPROM_CHIP_SELECT);
3178 udelay(1);
3179
3180 /* Write the register. */
3181 raise_gpio(hw, EEPROM_CHIP_SELECT);
3182 spi_reg(hw, AT93C_WRITE, reg);
3183 spi_w(hw, data);
3184 drop_gpio(hw, EEPROM_CHIP_SELECT);
3185 udelay(1);
3186
3187 /* Check operation complete. */
3188 raise_gpio(hw, EEPROM_CHIP_SELECT);
3189 timeout = 8;
3190 mdelay(2);
3191 do {
3192 mdelay(1);
3193 } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
3194 drop_gpio(hw, EEPROM_CHIP_SELECT);
3195 udelay(1);
3196
3197 /* Disable write. */
3198 raise_gpio(hw, EEPROM_CHIP_SELECT);
3199 spi_reg(hw, AT93C_CODE, AT93C_WR_OFF);
3200
3201 drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
3202}
3203
3204/*
3205 * Link detection routines
3206 */
3207
3208static u16 advertised_flow_ctrl(struct ksz_port *port, u16 ctrl)
3209{
3210 ctrl &= ~PORT_AUTO_NEG_SYM_PAUSE;
3211 switch (port->flow_ctrl) {
3212 case PHY_FLOW_CTRL:
3213 ctrl |= PORT_AUTO_NEG_SYM_PAUSE;
3214 break;
3215 /* Not supported. */
3216 case PHY_TX_ONLY:
3217 case PHY_RX_ONLY:
3218 default:
3219 break;
3220 }
3221 return ctrl;
3222}
3223
3224static void set_flow_ctrl(struct ksz_hw *hw, int rx, int tx)
3225{
3226 u32 rx_cfg;
3227 u32 tx_cfg;
3228
3229 rx_cfg = hw->rx_cfg;
3230 tx_cfg = hw->tx_cfg;
3231 if (rx)
3232 hw->rx_cfg |= DMA_RX_FLOW_ENABLE;
3233 else
3234 hw->rx_cfg &= ~DMA_RX_FLOW_ENABLE;
3235 if (tx)
3236 hw->tx_cfg |= DMA_TX_FLOW_ENABLE;
3237 else
3238 hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
3239 if (hw->enabled) {
3240 if (rx_cfg != hw->rx_cfg)
3241 writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
3242 if (tx_cfg != hw->tx_cfg)
3243 writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
3244 }
3245}
3246
3247static void determine_flow_ctrl(struct ksz_hw *hw, struct ksz_port *port,
3248 u16 local, u16 remote)
3249{
3250 int rx;
3251 int tx;
3252
3253 if (hw->overrides & PAUSE_FLOW_CTRL)
3254 return;
3255
3256 rx = tx = 0;
3257 if (port->force_link)
3258 rx = tx = 1;
3259 if (remote & PHY_AUTO_NEG_SYM_PAUSE) {
3260 if (local & PHY_AUTO_NEG_SYM_PAUSE) {
3261 rx = tx = 1;
3262 } else if ((remote & PHY_AUTO_NEG_ASYM_PAUSE) &&
3263 (local & PHY_AUTO_NEG_PAUSE) ==
3264 PHY_AUTO_NEG_ASYM_PAUSE) {
3265 tx = 1;
3266 }
3267 } else if (remote & PHY_AUTO_NEG_ASYM_PAUSE) {
3268 if ((local & PHY_AUTO_NEG_PAUSE) == PHY_AUTO_NEG_PAUSE)
3269 rx = 1;
3270 }
3271 if (!hw->ksz_switch)
3272 set_flow_ctrl(hw, rx, tx);
3273}
3274
3275static inline void port_cfg_change(struct ksz_hw *hw, struct ksz_port *port,
3276 struct ksz_port_info *info, u16 link_status)
3277{
3278 if ((hw->features & HALF_DUPLEX_SIGNAL_BUG) &&
3279 !(hw->overrides & PAUSE_FLOW_CTRL)) {
3280 u32 cfg = hw->tx_cfg;
3281
3282 /* Disable flow control in the half duplex mode. */
3283 if (1 == info->duplex)
3284 hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
3285 if (hw->enabled && cfg != hw->tx_cfg)
3286 writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
3287 }
3288}
3289
3290/**
3291 * port_get_link_speed - get current link status
3292 * @port: The port instance.
3293 *
3294 * This routine reads PHY registers to determine the current link status of the
3295 * switch ports.
3296 */
3297static void port_get_link_speed(struct ksz_port *port)
3298{
3299 uint interrupt;
3300 struct ksz_port_info *info;
3301 struct ksz_port_info *linked = NULL;
3302 struct ksz_hw *hw = port->hw;
3303 u16 data;
3304 u16 status;
3305 u8 local;
3306 u8 remote;
3307 int i;
3308 int p;
3309 int change = 0;
3310
3311 interrupt = hw_block_intr(hw);
3312
3313 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
3314 info = &hw->port_info[p];
3315 port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
3316 port_r16(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
3317
3318 /*
3319 * Link status is changing all the time even when there is no
3320 * cable connection!
3321 */
3322 remote = status & (PORT_AUTO_NEG_COMPLETE |
3323 PORT_STATUS_LINK_GOOD);
3324 local = (u8) data;
3325
3326 /* No change to status. */
3327 if (local == info->advertised && remote == info->partner)
3328 continue;
3329
3330 info->advertised = local;
3331 info->partner = remote;
3332 if (status & PORT_STATUS_LINK_GOOD) {
3333
3334 /* Remember the first linked port. */
3335 if (!linked)
3336 linked = info;
3337
3338 info->tx_rate = 10 * TX_RATE_UNIT;
3339 if (status & PORT_STATUS_SPEED_100MBIT)
3340 info->tx_rate = 100 * TX_RATE_UNIT;
3341
3342 info->duplex = 1;
3343 if (status & PORT_STATUS_FULL_DUPLEX)
3344 info->duplex = 2;
3345
3346 if (media_connected != info->state) {
3347 hw_r_phy(hw, p, KS884X_PHY_AUTO_NEG_OFFSET,
3348 &data);
3349 hw_r_phy(hw, p, KS884X_PHY_REMOTE_CAP_OFFSET,
3350 &status);
3351 determine_flow_ctrl(hw, port, data, status);
3352 if (hw->ksz_switch) {
3353 port_cfg_back_pressure(hw, p,
3354 (1 == info->duplex));
3355 }
3356 change |= 1 << i;
3357 port_cfg_change(hw, port, info, status);
3358 }
3359 info->state = media_connected;
3360 } else {
3361 if (media_disconnected != info->state) {
3362 change |= 1 << i;
3363
3364 /* Indicate the link just goes down. */
3365 hw->port_mib[p].link_down = 1;
3366 }
3367 info->state = media_disconnected;
3368 }
3369 hw->port_mib[p].state = (u8) info->state;
3370 }
3371
3372 if (linked && media_disconnected == port->linked->state)
3373 port->linked = linked;
3374
3375 hw_restore_intr(hw, interrupt);
3376}
3377
3378#define PHY_RESET_TIMEOUT 10
3379
3380/**
3381 * port_set_link_speed - set port speed
3382 * @port: The port instance.
3383 *
3384 * This routine sets the link speed of the switch ports.
3385 */
3386static void port_set_link_speed(struct ksz_port *port)
3387{
3388 struct ksz_port_info *info;
3389 struct ksz_hw *hw = port->hw;
3390 u16 data;
3391 u16 cfg;
3392 u8 status;
3393 int i;
3394 int p;
3395
3396 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
3397 info = &hw->port_info[p];
3398
3399 port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
3400 port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
3401
3402 cfg = 0;
3403 if (status & PORT_STATUS_LINK_GOOD)
3404 cfg = data;
3405
3406 data |= PORT_AUTO_NEG_ENABLE;
3407 data = advertised_flow_ctrl(port, data);
3408
3409 data |= PORT_AUTO_NEG_100BTX_FD | PORT_AUTO_NEG_100BTX |
3410 PORT_AUTO_NEG_10BT_FD | PORT_AUTO_NEG_10BT;
3411
3412 /* Check if manual configuration is specified by the user. */
3413 if (port->speed || port->duplex) {
3414 if (10 == port->speed)
3415 data &= ~(PORT_AUTO_NEG_100BTX_FD |
3416 PORT_AUTO_NEG_100BTX);
3417 else if (100 == port->speed)
3418 data &= ~(PORT_AUTO_NEG_10BT_FD |
3419 PORT_AUTO_NEG_10BT);
3420 if (1 == port->duplex)
3421 data &= ~(PORT_AUTO_NEG_100BTX_FD |
3422 PORT_AUTO_NEG_10BT_FD);
3423 else if (2 == port->duplex)
3424 data &= ~(PORT_AUTO_NEG_100BTX |
3425 PORT_AUTO_NEG_10BT);
3426 }
3427 if (data != cfg) {
3428 data |= PORT_AUTO_NEG_RESTART;
3429 port_w16(hw, p, KS884X_PORT_CTRL_4_OFFSET, data);
3430 }
3431 }
3432}
3433
3434/**
3435 * port_force_link_speed - force port speed
3436 * @port: The port instance.
3437 *
3438 * This routine forces the link speed of the switch ports.
3439 */
3440static void port_force_link_speed(struct ksz_port *port)
3441{
3442 struct ksz_hw *hw = port->hw;
3443 u16 data;
3444 int i;
3445 int phy;
3446 int p;
3447
3448 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
3449 phy = KS884X_PHY_1_CTRL_OFFSET + p * PHY_CTRL_INTERVAL;
3450 hw_r_phy_ctrl(hw, phy, &data);
3451
3452 data &= ~PHY_AUTO_NEG_ENABLE;
3453
3454 if (10 == port->speed)
3455 data &= ~PHY_SPEED_100MBIT;
3456 else if (100 == port->speed)
3457 data |= PHY_SPEED_100MBIT;
3458 if (1 == port->duplex)
3459 data &= ~PHY_FULL_DUPLEX;
3460 else if (2 == port->duplex)
3461 data |= PHY_FULL_DUPLEX;
3462 hw_w_phy_ctrl(hw, phy, data);
3463 }
3464}
3465
3466static void port_set_power_saving(struct ksz_port *port, int enable)
3467{
3468 struct ksz_hw *hw = port->hw;
3469 int i;
3470 int p;
3471
3472 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++)
3473 port_cfg(hw, p,
3474 KS884X_PORT_CTRL_4_OFFSET, PORT_POWER_DOWN, enable);
3475}
3476
3477/*
3478 * KSZ8841 power management functions
3479 */
3480
3481/**
3482 * hw_chk_wol_pme_status - check PMEN pin
3483 * @hw: The hardware instance.
3484 *
3485 * This function is used to check PMEN pin is asserted.
3486 *
3487 * Return 1 if PMEN pin is asserted; otherwise, 0.
3488 */
3489static int hw_chk_wol_pme_status(struct ksz_hw *hw)
3490{
3491 struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
3492 struct pci_dev *pdev = hw_priv->pdev;
3493 u16 data;
3494
3495 if (!pdev->pm_cap)
3496 return 0;
3497 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
3498 return (data & PCI_PM_CTRL_PME_STATUS) == PCI_PM_CTRL_PME_STATUS;
3499}
3500
3501/**
3502 * hw_clr_wol_pme_status - clear PMEN pin
3503 * @hw: The hardware instance.
3504 *
3505 * This routine is used to clear PME_Status to deassert PMEN pin.
3506 */
3507static void hw_clr_wol_pme_status(struct ksz_hw *hw)
3508{
3509 struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
3510 struct pci_dev *pdev = hw_priv->pdev;
3511 u16 data;
3512
3513 if (!pdev->pm_cap)
3514 return;
3515
3516 /* Clear PME_Status to deassert PMEN pin. */
3517 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
3518 data |= PCI_PM_CTRL_PME_STATUS;
3519 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
3520}
3521
3522/**
3523 * hw_cfg_wol_pme - enable or disable Wake-on-LAN
3524 * @hw: The hardware instance.
3525 * @set: The flag indicating whether to enable or disable.
3526 *
3527 * This routine is used to enable or disable Wake-on-LAN.
3528 */
3529static void hw_cfg_wol_pme(struct ksz_hw *hw, int set)
3530{
3531 struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
3532 struct pci_dev *pdev = hw_priv->pdev;
3533 u16 data;
3534
3535 if (!pdev->pm_cap)
3536 return;
3537 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
3538 data &= ~PCI_PM_CTRL_STATE_MASK;
3539 if (set)
3540 data |= PCI_PM_CTRL_PME_ENABLE | PCI_D3hot;
3541 else
3542 data &= ~PCI_PM_CTRL_PME_ENABLE;
3543 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
3544}
3545
3546/**
3547 * hw_cfg_wol - configure Wake-on-LAN features
3548 * @hw: The hardware instance.
3549 * @frame: The pattern frame bit.
3550 * @set: The flag indicating whether to enable or disable.
3551 *
3552 * This routine is used to enable or disable certain Wake-on-LAN features.
3553 */
3554static void hw_cfg_wol(struct ksz_hw *hw, u16 frame, int set)
3555{
3556 u16 data;
3557
3558 data = readw(hw->io + KS8841_WOL_CTRL_OFFSET);
3559 if (set)
3560 data |= frame;
3561 else
3562 data &= ~frame;
3563 writew(data, hw->io + KS8841_WOL_CTRL_OFFSET);
3564}
3565
3566/**
3567 * hw_set_wol_frame - program Wake-on-LAN pattern
3568 * @hw: The hardware instance.
3569 * @i: The frame index.
3570 * @mask_size: The size of the mask.
3571 * @mask: Mask to ignore certain bytes in the pattern.
3572 * @frame_size: The size of the frame.
3573 * @pattern: The frame data.
3574 *
3575 * This routine is used to program Wake-on-LAN pattern.
3576 */
3577static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size,
3578 u8 *mask, uint frame_size, u8 *pattern)
3579{
3580 int bits;
3581 int from;
3582 int len;
3583 int to;
3584 u32 crc;
3585 u8 data[64];
3586 u8 val = 0;
3587
3588 if (frame_size > mask_size * 8)
3589 frame_size = mask_size * 8;
3590 if (frame_size > 64)
3591 frame_size = 64;
3592
3593 i *= 0x10;
3594 writel(0, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i);
3595 writel(0, hw->io + KS8841_WOL_FRAME_BYTE2_OFFSET + i);
3596
3597 bits = len = from = to = 0;
3598 do {
3599 if (bits) {
3600 if ((val & 1))
3601 data[to++] = pattern[from];
3602 val >>= 1;
3603 ++from;
3604 --bits;
3605 } else {
3606 val = mask[len];
3607 writeb(val, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i
3608 + len);
3609 ++len;
3610 if (val)
3611 bits = 8;
3612 else
3613 from += 8;
3614 }
3615 } while (from < (int) frame_size);
3616 if (val) {
3617 bits = mask[len - 1];
3618 val <<= (from % 8);
3619 bits &= ~val;
3620 writeb(bits, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i + len -
3621 1);
3622 }
3623 crc = ether_crc(to, data);
3624 writel(crc, hw->io + KS8841_WOL_FRAME_CRC_OFFSET + i);
3625}
3626
3627/**
3628 * hw_add_wol_arp - add ARP pattern
3629 * @hw: The hardware instance.
3630 * @ip_addr: The IPv4 address assigned to the device.
3631 *
3632 * This routine is used to add ARP pattern for waking up the host.
3633 */
3634static void hw_add_wol_arp(struct ksz_hw *hw, u8 *ip_addr)
3635{
3636 u8 mask[6] = { 0x3F, 0xF0, 0x3F, 0x00, 0xC0, 0x03 };
3637 u8 pattern[42] = {
3638 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
3639 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3640 0x08, 0x06,
3641 0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x01,
3642 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3643 0x00, 0x00, 0x00, 0x00,
3644 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3645 0x00, 0x00, 0x00, 0x00 };
3646
3647 memcpy(&pattern[38], ip_addr, 4);
3648 hw_set_wol_frame(hw, 3, 6, mask, 42, pattern);
3649}
3650
3651/**
3652 * hw_add_wol_bcast - add broadcast pattern
3653 * @hw: The hardware instance.
3654 *
3655 * This routine is used to add broadcast pattern for waking up the host.
3656 */
3657static void hw_add_wol_bcast(struct ksz_hw *hw)
3658{
3659 u8 mask[] = { 0x3F };
3660 u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3661
3662 hw_set_wol_frame(hw, 2, 1, mask, MAC_ADDR_LEN, pattern);
3663}
3664
3665/**
3666 * hw_add_wol_mcast - add multicast pattern
3667 * @hw: The hardware instance.
3668 *
3669 * This routine is used to add multicast pattern for waking up the host.
3670 *
3671 * It is assumed the multicast packet is the ICMPv6 neighbor solicitation used
3672 * by IPv6 ping command. Note that multicast packets are filtred through the
3673 * multicast hash table, so not all multicast packets can wake up the host.
3674 */
3675static void hw_add_wol_mcast(struct ksz_hw *hw)
3676{
3677 u8 mask[] = { 0x3F };
3678 u8 pattern[] = { 0x33, 0x33, 0xFF, 0x00, 0x00, 0x00 };
3679
3680 memcpy(&pattern[3], &hw->override_addr[3], 3);
3681 hw_set_wol_frame(hw, 1, 1, mask, 6, pattern);
3682}
3683
3684/**
3685 * hw_add_wol_ucast - add unicast pattern
3686 * @hw: The hardware instance.
3687 *
3688 * This routine is used to add unicast pattern to wakeup the host.
3689 *
3690 * It is assumed the unicast packet is directed to the device, as the hardware
3691 * can only receive them in normal case.
3692 */
3693static void hw_add_wol_ucast(struct ksz_hw *hw)
3694{
3695 u8 mask[] = { 0x3F };
3696
3697 hw_set_wol_frame(hw, 0, 1, mask, MAC_ADDR_LEN, hw->override_addr);
3698}
3699
3700/**
3701 * hw_enable_wol - enable Wake-on-LAN
3702 * @hw: The hardware instance.
3703 * @wol_enable: The Wake-on-LAN settings.
3704 * @net_addr: The IPv4 address assigned to the device.
3705 *
3706 * This routine is used to enable Wake-on-LAN depending on driver settings.
3707 */
3708static void hw_enable_wol(struct ksz_hw *hw, u32 wol_enable, u8 *net_addr)
3709{
3710 hw_cfg_wol(hw, KS8841_WOL_MAGIC_ENABLE, (wol_enable & WAKE_MAGIC));
3711 hw_cfg_wol(hw, KS8841_WOL_FRAME0_ENABLE, (wol_enable & WAKE_UCAST));
3712 hw_add_wol_ucast(hw);
3713 hw_cfg_wol(hw, KS8841_WOL_FRAME1_ENABLE, (wol_enable & WAKE_MCAST));
3714 hw_add_wol_mcast(hw);
3715 hw_cfg_wol(hw, KS8841_WOL_FRAME2_ENABLE, (wol_enable & WAKE_BCAST));
3716 hw_cfg_wol(hw, KS8841_WOL_FRAME3_ENABLE, (wol_enable & WAKE_ARP));
3717 hw_add_wol_arp(hw, net_addr);
3718}
3719
3720/**
3721 * hw_init - check driver is correct for the hardware
3722 * @hw: The hardware instance.
3723 *
3724 * This function checks the hardware is correct for this driver and sets the
3725 * hardware up for proper initialization.
3726 *
3727 * Return number of ports or 0 if not right.
3728 */
3729static int hw_init(struct ksz_hw *hw)
3730{
3731 int rc = 0;
3732 u16 data;
3733 u16 revision;
3734
3735 /* Set bus speed to 125MHz. */
3736 writew(BUS_SPEED_125_MHZ, hw->io + KS884X_BUS_CTRL_OFFSET);
3737
3738 /* Check KSZ884x chip ID. */
3739 data = readw(hw->io + KS884X_CHIP_ID_OFFSET);
3740
3741 revision = (data & KS884X_REVISION_MASK) >> KS884X_REVISION_SHIFT;
3742 data &= KS884X_CHIP_ID_MASK_41;
3743 if (REG_CHIP_ID_41 == data)
3744 rc = 1;
3745 else if (REG_CHIP_ID_42 == data)
3746 rc = 2;
3747 else
3748 return 0;
3749
3750 /* Setup hardware features or bug workarounds. */
3751 if (revision <= 1) {
3752 hw->features |= SMALL_PACKET_TX_BUG;
3753 if (1 == rc)
3754 hw->features |= HALF_DUPLEX_SIGNAL_BUG;
3755 }
3756 hw->features |= IPV6_CSUM_GEN_HACK;
3757 return rc;
3758}
3759
3760/**
3761 * hw_reset - reset the hardware
3762 * @hw: The hardware instance.
3763 *
3764 * This routine resets the hardware.
3765 */
3766static void hw_reset(struct ksz_hw *hw)
3767{
3768 writew(GLOBAL_SOFTWARE_RESET, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
3769
3770 /* Wait for device to reset. */
3771 mdelay(10);
3772
3773 /* Write 0 to clear device reset. */
3774 writew(0, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
3775}
3776
3777/**
3778 * hw_setup - setup the hardware
3779 * @hw: The hardware instance.
3780 *
3781 * This routine setup the hardware for proper operation.
3782 */
3783static void hw_setup(struct ksz_hw *hw)
3784{
3785#if SET_DEFAULT_LED
3786 u16 data;
3787
3788 /* Change default LED mode. */
3789 data = readw(hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
3790 data &= ~LED_MODE;
3791 data |= SET_DEFAULT_LED;
3792 writew(data, hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
3793#endif
3794
3795 /* Setup transmit control. */
3796 hw->tx_cfg = (DMA_TX_PAD_ENABLE | DMA_TX_CRC_ENABLE |
3797 (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_TX_ENABLE);
3798
3799 /* Setup receive control. */
3800 hw->rx_cfg = (DMA_RX_BROADCAST | DMA_RX_UNICAST |
3801 (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_RX_ENABLE);
3802 hw->rx_cfg |= KS884X_DMA_RX_MULTICAST;
3803
3804 /* Hardware cannot handle UDP packet in IP fragments. */
3805 hw->rx_cfg |= (DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
3806
3807 if (hw->all_multi)
3808 hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
3809 if (hw->promiscuous)
3810 hw->rx_cfg |= DMA_RX_PROMISCUOUS;
3811}
3812
3813/**
3814 * hw_setup_intr - setup interrupt mask
3815 * @hw: The hardware instance.
3816 *
3817 * This routine setup the interrupt mask for proper operation.
3818 */
3819static void hw_setup_intr(struct ksz_hw *hw)
3820{
3821 hw->intr_mask = KS884X_INT_MASK | KS884X_INT_RX_OVERRUN;
3822}
3823
3824static void ksz_check_desc_num(struct ksz_desc_info *info)
3825{
3826#define MIN_DESC_SHIFT 2
3827
3828 int alloc = info->alloc;
3829 int shift;
3830
3831 shift = 0;
3832 while (!(alloc & 1)) {
3833 shift++;
3834 alloc >>= 1;
3835 }
3836 if (alloc != 1 || shift < MIN_DESC_SHIFT) {
3837 printk(KERN_ALERT "Hardware descriptor numbers not right!\n");
3838 while (alloc) {
3839 shift++;
3840 alloc >>= 1;
3841 }
3842 if (shift < MIN_DESC_SHIFT)
3843 shift = MIN_DESC_SHIFT;
3844 alloc = 1 << shift;
3845 info->alloc = alloc;
3846 }
3847 info->mask = info->alloc - 1;
3848}
3849
3850static void hw_init_desc(struct ksz_desc_info *desc_info, int transmit)
3851{
3852 int i;
3853 u32 phys = desc_info->ring_phys;
3854 struct ksz_hw_desc *desc = desc_info->ring_virt;
3855 struct ksz_desc *cur = desc_info->ring;
3856 struct ksz_desc *previous = NULL;
3857
3858 for (i = 0; i < desc_info->alloc; i++) {
3859 cur->phw = desc++;
3860 phys += desc_info->size;
3861 previous = cur++;
3862 previous->phw->next = cpu_to_le32(phys);
3863 }
3864 previous->phw->next = cpu_to_le32(desc_info->ring_phys);
3865 previous->sw.buf.rx.end_of_ring = 1;
3866 previous->phw->buf.data = cpu_to_le32(previous->sw.buf.data);
3867
3868 desc_info->avail = desc_info->alloc;
3869 desc_info->last = desc_info->next = 0;
3870
3871 desc_info->cur = desc_info->ring;
3872}
3873
3874/**
3875 * hw_set_desc_base - set descriptor base addresses
3876 * @hw: The hardware instance.
3877 * @tx_addr: The transmit descriptor base.
3878 * @rx_addr: The receive descriptor base.
3879 *
3880 * This routine programs the descriptor base addresses after reset.
3881 */
3882static void hw_set_desc_base(struct ksz_hw *hw, u32 tx_addr, u32 rx_addr)
3883{
3884 /* Set base address of Tx/Rx descriptors. */
3885 writel(tx_addr, hw->io + KS_DMA_TX_ADDR);
3886 writel(rx_addr, hw->io + KS_DMA_RX_ADDR);
3887}
3888
3889static void hw_reset_pkts(struct ksz_desc_info *info)
3890{
3891 info->cur = info->ring;
3892 info->avail = info->alloc;
3893 info->last = info->next = 0;
3894}
3895
3896static inline void hw_resume_rx(struct ksz_hw *hw)
3897{
3898 writel(DMA_START, hw->io + KS_DMA_RX_START);
3899}
3900
3901/**
3902 * hw_start_rx - start receiving
3903 * @hw: The hardware instance.
3904 *
3905 * This routine starts the receive function of the hardware.
3906 */
3907static void hw_start_rx(struct ksz_hw *hw)
3908{
3909 writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
3910
3911 /* Notify when the receive stops. */
3912 hw->intr_mask |= KS884X_INT_RX_STOPPED;
3913
3914 writel(DMA_START, hw->io + KS_DMA_RX_START);
3915 hw_ack_intr(hw, KS884X_INT_RX_STOPPED);
3916 hw->rx_stop++;
3917
3918 /* Variable overflows. */
3919 if (0 == hw->rx_stop)
3920 hw->rx_stop = 2;
3921}
3922
3923/*
3924 * hw_stop_rx - stop receiving
3925 * @hw: The hardware instance.
3926 *
3927 * This routine stops the receive function of the hardware.
3928 */
3929static void hw_stop_rx(struct ksz_hw *hw)
3930{
3931 hw->rx_stop = 0;
3932 hw_turn_off_intr(hw, KS884X_INT_RX_STOPPED);
3933 writel((hw->rx_cfg & ~DMA_RX_ENABLE), hw->io + KS_DMA_RX_CTRL);
3934}
3935
3936/**
3937 * hw_start_tx - start transmitting
3938 * @hw: The hardware instance.
3939 *
3940 * This routine starts the transmit function of the hardware.
3941 */
3942static void hw_start_tx(struct ksz_hw *hw)
3943{
3944 writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
3945}
3946
3947/**
3948 * hw_stop_tx - stop transmitting
3949 * @hw: The hardware instance.
3950 *
3951 * This routine stops the transmit function of the hardware.
3952 */
3953static void hw_stop_tx(struct ksz_hw *hw)
3954{
3955 writel((hw->tx_cfg & ~DMA_TX_ENABLE), hw->io + KS_DMA_TX_CTRL);
3956}
3957
3958/**
3959 * hw_disable - disable hardware
3960 * @hw: The hardware instance.
3961 *
3962 * This routine disables the hardware.
3963 */
3964static void hw_disable(struct ksz_hw *hw)
3965{
3966 hw_stop_rx(hw);
3967 hw_stop_tx(hw);
3968 hw->enabled = 0;
3969}
3970
3971/**
3972 * hw_enable - enable hardware
3973 * @hw: The hardware instance.
3974 *
3975 * This routine enables the hardware.
3976 */
3977static void hw_enable(struct ksz_hw *hw)
3978{
3979 hw_start_tx(hw);
3980 hw_start_rx(hw);
3981 hw->enabled = 1;
3982}
3983
3984/**
3985 * hw_alloc_pkt - allocate enough descriptors for transmission
3986 * @hw: The hardware instance.
3987 * @length: The length of the packet.
3988 * @physical: Number of descriptors required.
3989 *
3990 * This function allocates descriptors for transmission.
3991 *
3992 * Return 0 if not successful; 1 for buffer copy; or number of descriptors.
3993 */
3994static int hw_alloc_pkt(struct ksz_hw *hw, int length, int physical)
3995{
3996 /* Always leave one descriptor free. */
3997 if (hw->tx_desc_info.avail <= 1)
3998 return 0;
3999
4000 /* Allocate a descriptor for transmission and mark it current. */
4001 get_tx_pkt(&hw->tx_desc_info, &hw->tx_desc_info.cur);
4002 hw->tx_desc_info.cur->sw.buf.tx.first_seg = 1;
4003
4004 /* Keep track of number of transmit descriptors used so far. */
4005 ++hw->tx_int_cnt;
4006 hw->tx_size += length;
4007
4008 /* Cannot hold on too much data. */
4009 if (hw->tx_size >= MAX_TX_HELD_SIZE)
4010 hw->tx_int_cnt = hw->tx_int_mask + 1;
4011
4012 if (physical > hw->tx_desc_info.avail)
4013 return 1;
4014
4015 return hw->tx_desc_info.avail;
4016}
4017
4018/**
4019 * hw_send_pkt - mark packet for transmission
4020 * @hw: The hardware instance.
4021 *
4022 * This routine marks the packet for transmission in PCI version.
4023 */
4024static void hw_send_pkt(struct ksz_hw *hw)
4025{
4026 struct ksz_desc *cur = hw->tx_desc_info.cur;
4027
4028 cur->sw.buf.tx.last_seg = 1;
4029
4030 /* Interrupt only after specified number of descriptors used. */
4031 if (hw->tx_int_cnt > hw->tx_int_mask) {
4032 cur->sw.buf.tx.intr = 1;
4033 hw->tx_int_cnt = 0;
4034 hw->tx_size = 0;
4035 }
4036
4037 /* KSZ8842 supports port directed transmission. */
4038 cur->sw.buf.tx.dest_port = hw->dst_ports;
4039
4040 release_desc(cur);
4041
4042 writel(0, hw->io + KS_DMA_TX_START);
4043}
4044
4045static int empty_addr(u8 *addr)
4046{
4047 u32 *addr1 = (u32 *) addr;
4048 u16 *addr2 = (u16 *) &addr[4];
4049
4050 return 0 == *addr1 && 0 == *addr2;
4051}
4052
4053/**
4054 * hw_set_addr - set MAC address
4055 * @hw: The hardware instance.
4056 *
4057 * This routine programs the MAC address of the hardware when the address is
4058 * overrided.
4059 */
4060static void hw_set_addr(struct ksz_hw *hw)
4061{
4062 int i;
4063
4064 for (i = 0; i < MAC_ADDR_LEN; i++)
4065 writeb(hw->override_addr[MAC_ADDR_ORDER(i)],
4066 hw->io + KS884X_ADDR_0_OFFSET + i);
4067
4068 sw_set_addr(hw, hw->override_addr);
4069}
4070
4071/**
4072 * hw_read_addr - read MAC address
4073 * @hw: The hardware instance.
4074 *
4075 * This routine retrieves the MAC address of the hardware.
4076 */
4077static void hw_read_addr(struct ksz_hw *hw)
4078{
4079 int i;
4080
4081 for (i = 0; i < MAC_ADDR_LEN; i++)
4082 hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io +
4083 KS884X_ADDR_0_OFFSET + i);
4084
4085 if (!hw->mac_override) {
4086 memcpy(hw->override_addr, hw->perm_addr, MAC_ADDR_LEN);
4087 if (empty_addr(hw->override_addr)) {
4088 memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS,
4089 MAC_ADDR_LEN);
4090 memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS,
4091 MAC_ADDR_LEN);
4092 hw->override_addr[5] += hw->id;
4093 hw_set_addr(hw);
4094 }
4095 }
4096}
4097
4098static void hw_ena_add_addr(struct ksz_hw *hw, int index, u8 *mac_addr)
4099{
4100 int i;
4101 u32 mac_addr_lo;
4102 u32 mac_addr_hi;
4103
4104 mac_addr_hi = 0;
4105 for (i = 0; i < 2; i++) {
4106 mac_addr_hi <<= 8;
4107 mac_addr_hi |= mac_addr[i];
4108 }
4109 mac_addr_hi |= ADD_ADDR_ENABLE;
4110 mac_addr_lo = 0;
4111 for (i = 2; i < 6; i++) {
4112 mac_addr_lo <<= 8;
4113 mac_addr_lo |= mac_addr[i];
4114 }
4115 index *= ADD_ADDR_INCR;
4116
4117 writel(mac_addr_lo, hw->io + index + KS_ADD_ADDR_0_LO);
4118 writel(mac_addr_hi, hw->io + index + KS_ADD_ADDR_0_HI);
4119}
4120
4121static void hw_set_add_addr(struct ksz_hw *hw)
4122{
4123 int i;
4124
4125 for (i = 0; i < ADDITIONAL_ENTRIES; i++) {
4126 if (empty_addr(hw->address[i]))
4127 writel(0, hw->io + ADD_ADDR_INCR * i +
4128 KS_ADD_ADDR_0_HI);
4129 else
4130 hw_ena_add_addr(hw, i, hw->address[i]);
4131 }
4132}
4133
4134static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
4135{
4136 int i;
4137 int j = ADDITIONAL_ENTRIES;
4138
4139 if (!memcmp(hw->override_addr, mac_addr, MAC_ADDR_LEN))
4140 return 0;
4141 for (i = 0; i < hw->addr_list_size; i++) {
4142 if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN))
4143 return 0;
4144 if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
4145 j = i;
4146 }
4147 if (j < ADDITIONAL_ENTRIES) {
4148 memcpy(hw->address[j], mac_addr, MAC_ADDR_LEN);
4149 hw_ena_add_addr(hw, j, hw->address[j]);
4150 return 0;
4151 }
4152 return -1;
4153}
4154
4155static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
4156{
4157 int i;
4158
4159 for (i = 0; i < hw->addr_list_size; i++) {
4160 if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN)) {
4161 memset(hw->address[i], 0, MAC_ADDR_LEN);
4162 writel(0, hw->io + ADD_ADDR_INCR * i +
4163 KS_ADD_ADDR_0_HI);
4164 return 0;
4165 }
4166 }
4167 return -1;
4168}
4169
4170/**
4171 * hw_clr_multicast - clear multicast addresses
4172 * @hw: The hardware instance.
4173 *
4174 * This routine removes all multicast addresses set in the hardware.
4175 */
4176static void hw_clr_multicast(struct ksz_hw *hw)
4177{
4178 int i;
4179
4180 for (i = 0; i < HW_MULTICAST_SIZE; i++) {
4181 hw->multi_bits[i] = 0;
4182
4183 writeb(0, hw->io + KS884X_MULTICAST_0_OFFSET + i);
4184 }
4185}
4186
4187/**
4188 * hw_set_grp_addr - set multicast addresses
4189 * @hw: The hardware instance.
4190 *
4191 * This routine programs multicast addresses for the hardware to accept those
4192 * addresses.
4193 */
4194static void hw_set_grp_addr(struct ksz_hw *hw)
4195{
4196 int i;
4197 int index;
4198 int position;
4199 int value;
4200
4201 memset(hw->multi_bits, 0, sizeof(u8) * HW_MULTICAST_SIZE);
4202
4203 for (i = 0; i < hw->multi_list_size; i++) {
4204 position = (ether_crc(6, hw->multi_list[i]) >> 26) & 0x3f;
4205 index = position >> 3;
4206 value = 1 << (position & 7);
4207 hw->multi_bits[index] |= (u8) value;
4208 }
4209
4210 for (i = 0; i < HW_MULTICAST_SIZE; i++)
4211 writeb(hw->multi_bits[i], hw->io + KS884X_MULTICAST_0_OFFSET +
4212 i);
4213}
4214
4215/**
4216 * hw_set_multicast - enable or disable all multicast receiving
4217 * @hw: The hardware instance.
4218 * @multicast: To turn on or off the all multicast feature.
4219 *
4220 * This routine enables/disables the hardware to accept all multicast packets.
4221 */
4222static void hw_set_multicast(struct ksz_hw *hw, u8 multicast)
4223{
4224 /* Stop receiving for reconfiguration. */
4225 hw_stop_rx(hw);
4226
4227 if (multicast)
4228 hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
4229 else
4230 hw->rx_cfg &= ~DMA_RX_ALL_MULTICAST;
4231
4232 if (hw->enabled)
4233 hw_start_rx(hw);
4234}
4235
4236/**
4237 * hw_set_promiscuous - enable or disable promiscuous receiving
4238 * @hw: The hardware instance.
4239 * @prom: To turn on or off the promiscuous feature.
4240 *
4241 * This routine enables/disables the hardware to accept all packets.
4242 */
4243static void hw_set_promiscuous(struct ksz_hw *hw, u8 prom)
4244{
4245 /* Stop receiving for reconfiguration. */
4246 hw_stop_rx(hw);
4247
4248 if (prom)
4249 hw->rx_cfg |= DMA_RX_PROMISCUOUS;
4250 else
4251 hw->rx_cfg &= ~DMA_RX_PROMISCUOUS;
4252
4253 if (hw->enabled)
4254 hw_start_rx(hw);
4255}
4256
4257/**
4258 * sw_enable - enable the switch
4259 * @hw: The hardware instance.
4260 * @enable: The flag to enable or disable the switch
4261 *
4262 * This routine is used to enable/disable the switch in KSZ8842.
4263 */
4264static void sw_enable(struct ksz_hw *hw, int enable)
4265{
4266 int port;
4267
4268 for (port = 0; port < SWITCH_PORT_NUM; port++) {
4269 if (hw->dev_count > 1) {
4270 /* Set port-base vlan membership with host port. */
4271 sw_cfg_port_base_vlan(hw, port,
4272 HOST_MASK | (1 << port));
4273 port_set_stp_state(hw, port, STP_STATE_DISABLED);
4274 } else {
4275 sw_cfg_port_base_vlan(hw, port, PORT_MASK);
4276 port_set_stp_state(hw, port, STP_STATE_FORWARDING);
4277 }
4278 }
4279 if (hw->dev_count > 1)
4280 port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
4281 else
4282 port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_FORWARDING);
4283
4284 if (enable)
4285 enable = KS8842_START;
4286 writew(enable, hw->io + KS884X_CHIP_ID_OFFSET);
4287}
4288
4289/**
4290 * sw_setup - setup the switch
4291 * @hw: The hardware instance.
4292 *
4293 * This routine setup the hardware switch engine for default operation.
4294 */
4295static void sw_setup(struct ksz_hw *hw)
4296{
4297 int port;
4298
4299 sw_set_global_ctrl(hw);
4300
4301 /* Enable switch broadcast storm protection at 10% percent rate. */
4302 sw_init_broad_storm(hw);
4303 hw_cfg_broad_storm(hw, BROADCAST_STORM_PROTECTION_RATE);
4304 for (port = 0; port < SWITCH_PORT_NUM; port++)
4305 sw_ena_broad_storm(hw, port);
4306
4307 sw_init_prio(hw);
4308
4309 sw_init_mirror(hw);
4310
4311 sw_init_prio_rate(hw);
4312
4313 sw_init_vlan(hw);
4314
4315 if (hw->features & STP_SUPPORT)
4316 sw_init_stp(hw);
4317 if (!sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
4318 SWITCH_TX_FLOW_CTRL | SWITCH_RX_FLOW_CTRL))
4319 hw->overrides |= PAUSE_FLOW_CTRL;
4320 sw_enable(hw, 1);
4321}
4322
4323/**
4324 * ksz_start_timer - start kernel timer
4325 * @info: Kernel timer information.
4326 * @time: The time tick.
4327 *
4328 * This routine starts the kernel timer after the specified time tick.
4329 */
4330static void ksz_start_timer(struct ksz_timer_info *info, int time)
4331{
4332 info->cnt = 0;
4333 info->timer.expires = jiffies + time;
4334 add_timer(&info->timer);
4335
4336 /* infinity */
4337 info->max = -1;
4338}
4339
4340/**
4341 * ksz_stop_timer - stop kernel timer
4342 * @info: Kernel timer information.
4343 *
4344 * This routine stops the kernel timer.
4345 */
4346static void ksz_stop_timer(struct ksz_timer_info *info)
4347{
4348 if (info->max) {
4349 info->max = 0;
4350 del_timer_sync(&info->timer);
4351 }
4352}
4353
4354static void ksz_init_timer(struct ksz_timer_info *info, int period,
4355 void (*function)(unsigned long), void *data)
4356{
4357 info->max = 0;
4358 info->period = period;
4359 init_timer(&info->timer);
4360 info->timer.function = function;
4361 info->timer.data = (unsigned long) data;
4362}
4363
4364static void ksz_update_timer(struct ksz_timer_info *info)
4365{
4366 ++info->cnt;
4367 if (info->max > 0) {
4368 if (info->cnt < info->max) {
4369 info->timer.expires = jiffies + info->period;
4370 add_timer(&info->timer);
4371 } else
4372 info->max = 0;
4373 } else if (info->max < 0) {
4374 info->timer.expires = jiffies + info->period;
4375 add_timer(&info->timer);
4376 }
4377}
4378
4379/**
4380 * ksz_alloc_soft_desc - allocate software descriptors
4381 * @desc_info: Descriptor information structure.
4382 * @transmit: Indication that descriptors are for transmit.
4383 *
4384 * This local function allocates software descriptors for manipulation in
4385 * memory.
4386 *
4387 * Return 0 if successful.
4388 */
4389static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
4390{
4391 desc_info->ring = kmalloc(sizeof(struct ksz_desc) * desc_info->alloc,
4392 GFP_KERNEL);
4393 if (!desc_info->ring)
4394 return 1;
4395 memset((void *) desc_info->ring, 0,
4396 sizeof(struct ksz_desc) * desc_info->alloc);
4397 hw_init_desc(desc_info, transmit);
4398 return 0;
4399}
4400
4401/**
4402 * ksz_alloc_desc - allocate hardware descriptors
4403 * @adapter: Adapter information structure.
4404 *
4405 * This local function allocates hardware descriptors for receiving and
4406 * transmitting.
4407 *
4408 * Return 0 if successful.
4409 */
4410static int ksz_alloc_desc(struct dev_info *adapter)
4411{
4412 struct ksz_hw *hw = &adapter->hw;
4413 int offset;
4414
4415 /* Allocate memory for RX & TX descriptors. */
4416 adapter->desc_pool.alloc_size =
4417 hw->rx_desc_info.size * hw->rx_desc_info.alloc +
4418 hw->tx_desc_info.size * hw->tx_desc_info.alloc +
4419 DESC_ALIGNMENT;
4420
4421 adapter->desc_pool.alloc_virt =
4422 pci_alloc_consistent(
4423 adapter->pdev, adapter->desc_pool.alloc_size,
4424 &adapter->desc_pool.dma_addr);
4425 if (adapter->desc_pool.alloc_virt == NULL) {
4426 adapter->desc_pool.alloc_size = 0;
4427 return 1;
4428 }
4429 memset(adapter->desc_pool.alloc_virt, 0, adapter->desc_pool.alloc_size);
4430
4431 /* Align to the next cache line boundary. */
4432 offset = (((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT) ?
4433 (DESC_ALIGNMENT -
4434 ((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT)) : 0);
4435 adapter->desc_pool.virt = adapter->desc_pool.alloc_virt + offset;
4436 adapter->desc_pool.phys = adapter->desc_pool.dma_addr + offset;
4437
4438 /* Allocate receive/transmit descriptors. */
4439 hw->rx_desc_info.ring_virt = (struct ksz_hw_desc *)
4440 adapter->desc_pool.virt;
4441 hw->rx_desc_info.ring_phys = adapter->desc_pool.phys;
4442 offset = hw->rx_desc_info.alloc * hw->rx_desc_info.size;
4443 hw->tx_desc_info.ring_virt = (struct ksz_hw_desc *)
4444 (adapter->desc_pool.virt + offset);
4445 hw->tx_desc_info.ring_phys = adapter->desc_pool.phys + offset;
4446
4447 if (ksz_alloc_soft_desc(&hw->rx_desc_info, 0))
4448 return 1;
4449 if (ksz_alloc_soft_desc(&hw->tx_desc_info, 1))
4450 return 1;
4451
4452 return 0;
4453}
4454
4455/**
4456 * free_dma_buf - release DMA buffer resources
4457 * @adapter: Adapter information structure.
4458 *
4459 * This routine is just a helper function to release the DMA buffer resources.
4460 */
4461static void free_dma_buf(struct dev_info *adapter, struct ksz_dma_buf *dma_buf,
4462 int direction)
4463{
4464 pci_unmap_single(adapter->pdev, dma_buf->dma, dma_buf->len, direction);
4465 dev_kfree_skb(dma_buf->skb);
4466 dma_buf->skb = NULL;
4467 dma_buf->dma = 0;
4468}
4469
4470/**
4471 * ksz_init_rx_buffers - initialize receive descriptors
4472 * @adapter: Adapter information structure.
4473 *
4474 * This routine initializes DMA buffers for receiving.
4475 */
4476static void ksz_init_rx_buffers(struct dev_info *adapter)
4477{
4478 int i;
4479 struct ksz_desc *desc;
4480 struct ksz_dma_buf *dma_buf;
4481 struct ksz_hw *hw = &adapter->hw;
4482 struct ksz_desc_info *info = &hw->rx_desc_info;
4483
4484 for (i = 0; i < hw->rx_desc_info.alloc; i++) {
4485 get_rx_pkt(info, &desc);
4486
4487 dma_buf = DMA_BUFFER(desc);
4488 if (dma_buf->skb && dma_buf->len != adapter->mtu)
4489 free_dma_buf(adapter, dma_buf, PCI_DMA_FROMDEVICE);
4490 dma_buf->len = adapter->mtu;
4491 if (!dma_buf->skb)
4492 dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC);
4493 if (dma_buf->skb && !dma_buf->dma) {
4494 dma_buf->skb->dev = adapter->dev;
4495 dma_buf->dma = pci_map_single(
4496 adapter->pdev,
4497 skb_tail_pointer(dma_buf->skb),
4498 dma_buf->len,
4499 PCI_DMA_FROMDEVICE);
4500 }
4501
4502 /* Set descriptor. */
4503 set_rx_buf(desc, dma_buf->dma);
4504 set_rx_len(desc, dma_buf->len);
4505 release_desc(desc);
4506 }
4507}
4508
4509/**
4510 * ksz_alloc_mem - allocate memory for hardware descriptors
4511 * @adapter: Adapter information structure.
4512 *
4513 * This function allocates memory for use by hardware descriptors for receiving
4514 * and transmitting.
4515 *
4516 * Return 0 if successful.
4517 */
4518static int ksz_alloc_mem(struct dev_info *adapter)
4519{
4520 struct ksz_hw *hw = &adapter->hw;
4521
4522 /* Determine the number of receive and transmit descriptors. */
4523 hw->rx_desc_info.alloc = NUM_OF_RX_DESC;
4524 hw->tx_desc_info.alloc = NUM_OF_TX_DESC;
4525
4526 /* Determine how many descriptors to skip transmit interrupt. */
4527 hw->tx_int_cnt = 0;
4528 hw->tx_int_mask = NUM_OF_TX_DESC / 4;
4529 if (hw->tx_int_mask > 8)
4530 hw->tx_int_mask = 8;
4531 while (hw->tx_int_mask) {
4532 hw->tx_int_cnt++;
4533 hw->tx_int_mask >>= 1;
4534 }
4535 if (hw->tx_int_cnt) {
4536 hw->tx_int_mask = (1 << (hw->tx_int_cnt - 1)) - 1;
4537 hw->tx_int_cnt = 0;
4538 }
4539
4540 /* Determine the descriptor size. */
4541 hw->rx_desc_info.size =
4542 (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
4543 DESC_ALIGNMENT) * DESC_ALIGNMENT);
4544 hw->tx_desc_info.size =
4545 (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
4546 DESC_ALIGNMENT) * DESC_ALIGNMENT);
4547 if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc))
4548 printk(KERN_ALERT
4549 "Hardware descriptor size not right!\n");
4550 ksz_check_desc_num(&hw->rx_desc_info);
4551 ksz_check_desc_num(&hw->tx_desc_info);
4552
4553 /* Allocate descriptors. */
4554 if (ksz_alloc_desc(adapter))
4555 return 1;
4556
4557 return 0;
4558}
4559
4560/**
4561 * ksz_free_desc - free software and hardware descriptors
4562 * @adapter: Adapter information structure.
4563 *
4564 * This local routine frees the software and hardware descriptors allocated by
4565 * ksz_alloc_desc().
4566 */
4567static void ksz_free_desc(struct dev_info *adapter)
4568{
4569 struct ksz_hw *hw = &adapter->hw;
4570
4571 /* Reset descriptor. */
4572 hw->rx_desc_info.ring_virt = NULL;
4573 hw->tx_desc_info.ring_virt = NULL;
4574 hw->rx_desc_info.ring_phys = 0;
4575 hw->tx_desc_info.ring_phys = 0;
4576
4577 /* Free memory. */
4578 if (adapter->desc_pool.alloc_virt)
4579 pci_free_consistent(
4580 adapter->pdev,
4581 adapter->desc_pool.alloc_size,
4582 adapter->desc_pool.alloc_virt,
4583 adapter->desc_pool.dma_addr);
4584
4585 /* Reset resource pool. */
4586 adapter->desc_pool.alloc_size = 0;
4587 adapter->desc_pool.alloc_virt = NULL;
4588
4589 kfree(hw->rx_desc_info.ring);
4590 hw->rx_desc_info.ring = NULL;
4591 kfree(hw->tx_desc_info.ring);
4592 hw->tx_desc_info.ring = NULL;
4593}
4594
4595/**
4596 * ksz_free_buffers - free buffers used in the descriptors
4597 * @adapter: Adapter information structure.
4598 * @desc_info: Descriptor information structure.
4599 *
4600 * This local routine frees buffers used in the DMA buffers.
4601 */
4602static void ksz_free_buffers(struct dev_info *adapter,
4603 struct ksz_desc_info *desc_info, int direction)
4604{
4605 int i;
4606 struct ksz_dma_buf *dma_buf;
4607 struct ksz_desc *desc = desc_info->ring;
4608
4609 for (i = 0; i < desc_info->alloc; i++) {
4610 dma_buf = DMA_BUFFER(desc);
4611 if (dma_buf->skb)
4612 free_dma_buf(adapter, dma_buf, direction);
4613 desc++;
4614 }
4615}
4616
4617/**
4618 * ksz_free_mem - free all resources used by descriptors
4619 * @adapter: Adapter information structure.
4620 *
4621 * This local routine frees all the resources allocated by ksz_alloc_mem().
4622 */
4623static void ksz_free_mem(struct dev_info *adapter)
4624{
4625 /* Free transmit buffers. */
4626 ksz_free_buffers(adapter, &adapter->hw.tx_desc_info,
4627 PCI_DMA_TODEVICE);
4628
4629 /* Free receive buffers. */
4630 ksz_free_buffers(adapter, &adapter->hw.rx_desc_info,
4631 PCI_DMA_FROMDEVICE);
4632
4633 /* Free descriptors. */
4634 ksz_free_desc(adapter);
4635}
4636
4637static void get_mib_counters(struct ksz_hw *hw, int first, int cnt,
4638 u64 *counter)
4639{
4640 int i;
4641 int mib;
4642 int port;
4643 struct ksz_port_mib *port_mib;
4644
4645 memset(counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
4646 for (i = 0, port = first; i < cnt; i++, port++) {
4647 port_mib = &hw->port_mib[port];
4648 for (mib = port_mib->mib_start; mib < hw->mib_cnt; mib++)
4649 counter[mib] += port_mib->counter[mib];
4650 }
4651}
4652
4653/**
4654 * send_packet - send packet
4655 * @skb: Socket buffer.
4656 * @dev: Network device.
4657 *
4658 * This routine is used to send a packet out to the network.
4659 */
4660static void send_packet(struct sk_buff *skb, struct net_device *dev)
4661{
4662 struct ksz_desc *desc;
4663 struct ksz_desc *first;
4664 struct dev_priv *priv = netdev_priv(dev);
4665 struct dev_info *hw_priv = priv->adapter;
4666 struct ksz_hw *hw = &hw_priv->hw;
4667 struct ksz_desc_info *info = &hw->tx_desc_info;
4668 struct ksz_dma_buf *dma_buf;
4669 int len;
4670 int last_frag = skb_shinfo(skb)->nr_frags;
4671
4672 /*
4673 * KSZ8842 with multiple device interfaces needs to be told which port
4674 * to send.
4675 */
4676 if (hw->dev_count > 1)
4677 hw->dst_ports = 1 << priv->port.first_port;
4678
4679 /* Hardware will pad the length to 60. */
4680 len = skb->len;
4681
4682 /* Remember the very first descriptor. */
4683 first = info->cur;
4684 desc = first;
4685
4686 dma_buf = DMA_BUFFER(desc);
4687 if (last_frag) {
4688 int frag;
4689 skb_frag_t *this_frag;
4690
4691 dma_buf->len = skb->len - skb->data_len;
4692
4693 dma_buf->dma = pci_map_single(
4694 hw_priv->pdev, skb->data, dma_buf->len,
4695 PCI_DMA_TODEVICE);
4696 set_tx_buf(desc, dma_buf->dma);
4697 set_tx_len(desc, dma_buf->len);
4698
4699 frag = 0;
4700 do {
4701 this_frag = &skb_shinfo(skb)->frags[frag];
4702
4703 /* Get a new descriptor. */
4704 get_tx_pkt(info, &desc);
4705
4706 /* Keep track of descriptors used so far. */
4707 ++hw->tx_int_cnt;
4708
4709 dma_buf = DMA_BUFFER(desc);
4710 dma_buf->len = this_frag->size;
4711
4712 dma_buf->dma = pci_map_single(
4713 hw_priv->pdev,
4714 page_address(this_frag->page) +
4715 this_frag->page_offset,
4716 dma_buf->len,
4717 PCI_DMA_TODEVICE);
4718 set_tx_buf(desc, dma_buf->dma);
4719 set_tx_len(desc, dma_buf->len);
4720
4721 frag++;
4722 if (frag == last_frag)
4723 break;
4724
4725 /* Do not release the last descriptor here. */
4726 release_desc(desc);
4727 } while (1);
4728
4729 /* current points to the last descriptor. */
4730 info->cur = desc;
4731
4732 /* Release the first descriptor. */
4733 release_desc(first);
4734 } else {
4735 dma_buf->len = len;
4736
4737 dma_buf->dma = pci_map_single(
4738 hw_priv->pdev, skb->data, dma_buf->len,
4739 PCI_DMA_TODEVICE);
4740 set_tx_buf(desc, dma_buf->dma);
4741 set_tx_len(desc, dma_buf->len);
4742 }
4743
4744 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4745 (desc)->sw.buf.tx.csum_gen_tcp = 1;
4746 (desc)->sw.buf.tx.csum_gen_udp = 1;
4747 }
4748
4749 /*
4750 * The last descriptor holds the packet so that it can be returned to
4751 * network subsystem after all descriptors are transmitted.
4752 */
4753 dma_buf->skb = skb;
4754
4755 hw_send_pkt(hw);
4756
4757 /* Update transmit statistics. */
4758 priv->stats.tx_packets++;
4759 priv->stats.tx_bytes += len;
4760}
4761
4762/**
4763 * transmit_cleanup - clean up transmit descriptors
4764 * @dev: Network device.
4765 *
4766 * This routine is called to clean up the transmitted buffers.
4767 */
4768static void transmit_cleanup(struct dev_info *hw_priv, int normal)
4769{
4770 int last;
4771 union desc_stat status;
4772 struct ksz_hw *hw = &hw_priv->hw;
4773 struct ksz_desc_info *info = &hw->tx_desc_info;
4774 struct ksz_desc *desc;
4775 struct ksz_dma_buf *dma_buf;
4776 struct net_device *dev = NULL;
4777
4778 spin_lock(&hw_priv->hwlock);
4779 last = info->last;
4780
4781 while (info->avail < info->alloc) {
4782 /* Get next descriptor which is not hardware owned. */
4783 desc = &info->ring[last];
4784 status.data = le32_to_cpu(desc->phw->ctrl.data);
4785 if (status.tx.hw_owned) {
4786 if (normal)
4787 break;
4788 else
4789 reset_desc(desc, status);
4790 }
4791
4792 dma_buf = DMA_BUFFER(desc);
4793 pci_unmap_single(
4794 hw_priv->pdev, dma_buf->dma, dma_buf->len,
4795 PCI_DMA_TODEVICE);
4796
4797 /* This descriptor contains the last buffer in the packet. */
4798 if (dma_buf->skb) {
4799 dev = dma_buf->skb->dev;
4800
4801 /* Release the packet back to network subsystem. */
4802 dev_kfree_skb_irq(dma_buf->skb);
4803 dma_buf->skb = NULL;
4804 }
4805
4806 /* Free the transmitted descriptor. */
4807 last++;
4808 last &= info->mask;
4809 info->avail++;
4810 }
4811 info->last = last;
4812 spin_unlock(&hw_priv->hwlock);
4813
4814 /* Notify the network subsystem that the packet has been sent. */
4815 if (dev)
4816 dev->trans_start = jiffies;
4817}
4818
4819/**
4820 * transmit_done - transmit done processing
4821 * @dev: Network device.
4822 *
4823 * This routine is called when the transmit interrupt is triggered, indicating
4824 * either a packet is sent successfully or there are transmit errors.
4825 */
4826static void tx_done(struct dev_info *hw_priv)
4827{
4828 struct ksz_hw *hw = &hw_priv->hw;
4829 int port;
4830
4831 transmit_cleanup(hw_priv, 1);
4832
4833 for (port = 0; port < hw->dev_count; port++) {
4834 struct net_device *dev = hw->port_info[port].pdev;
4835
4836 if (netif_running(dev) && netif_queue_stopped(dev))
4837 netif_wake_queue(dev);
4838 }
4839}
4840
4841static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
4842{
4843 skb->dev = old->dev;
4844 skb->protocol = old->protocol;
4845 skb->ip_summed = old->ip_summed;
4846 skb->csum = old->csum;
4847 skb_set_network_header(skb, ETH_HLEN);
4848
4849 dev_kfree_skb(old);
4850}
4851
4852/**
4853 * netdev_tx - send out packet
4854 * @skb: Socket buffer.
4855 * @dev: Network device.
4856 *
4857 * This function is used by the upper network layer to send out a packet.
4858 *
4859 * Return 0 if successful; otherwise an error code indicating failure.
4860 */
4861static int netdev_tx(struct sk_buff *skb, struct net_device *dev)
4862{
4863 struct dev_priv *priv = netdev_priv(dev);
4864 struct dev_info *hw_priv = priv->adapter;
4865 struct ksz_hw *hw = &hw_priv->hw;
4866 int left;
4867 int num = 1;
4868 int rc = 0;
4869
4870 if (hw->features & SMALL_PACKET_TX_BUG) {
4871 struct sk_buff *org_skb = skb;
4872
4873 if (skb->len <= 48) {
4874 if (skb_end_pointer(skb) - skb->data >= 50) {
4875 memset(&skb->data[skb->len], 0, 50 - skb->len);
4876 skb->len = 50;
4877 } else {
4878 skb = dev_alloc_skb(50);
4879 if (!skb)
4880 return NETDEV_TX_BUSY;
4881 memcpy(skb->data, org_skb->data, org_skb->len);
4882 memset(&skb->data[org_skb->len], 0,
4883 50 - org_skb->len);
4884 skb->len = 50;
4885 copy_old_skb(org_skb, skb);
4886 }
4887 }
4888 }
4889
4890 spin_lock_irq(&hw_priv->hwlock);
4891
4892 num = skb_shinfo(skb)->nr_frags + 1;
4893 left = hw_alloc_pkt(hw, skb->len, num);
4894 if (left) {
4895 if (left < num ||
4896 ((hw->features & IPV6_CSUM_GEN_HACK) &&
4897 (CHECKSUM_PARTIAL == skb->ip_summed) &&
4898 (ETH_P_IPV6 == htons(skb->protocol)))) {
4899 struct sk_buff *org_skb = skb;
4900
4901 skb = dev_alloc_skb(org_skb->len);
4902 if (!skb)
4903 return NETDEV_TX_BUSY;
4904 skb_copy_and_csum_dev(org_skb, skb->data);
4905 org_skb->ip_summed = 0;
4906 skb->len = org_skb->len;
4907 copy_old_skb(org_skb, skb);
4908 }
4909 send_packet(skb, dev);
4910 if (left <= num)
4911 netif_stop_queue(dev);
4912 } else {
4913 /* Stop the transmit queue until packet is allocated. */
4914 netif_stop_queue(dev);
4915 rc = NETDEV_TX_BUSY;
4916 }
4917
4918 spin_unlock_irq(&hw_priv->hwlock);
4919
4920 return rc;
4921}
4922
4923/**
4924 * netdev_tx_timeout - transmit timeout processing
4925 * @dev: Network device.
4926 *
4927 * This routine is called when the transmit timer expires. That indicates the
4928 * hardware is not running correctly because transmit interrupts are not
4929 * triggered to free up resources so that the transmit routine can continue
4930 * sending out packets. The hardware is reset to correct the problem.
4931 */
4932static void netdev_tx_timeout(struct net_device *dev)
4933{
4934 static unsigned long last_reset;
4935
4936 struct dev_priv *priv = netdev_priv(dev);
4937 struct dev_info *hw_priv = priv->adapter;
4938 struct ksz_hw *hw = &hw_priv->hw;
4939 int port;
4940
4941 if (hw->dev_count > 1) {
4942 /*
4943 * Only reset the hardware if time between calls is long
4944 * enough.
4945 */
4946 if (jiffies - last_reset <= dev->watchdog_timeo)
4947 hw_priv = NULL;
4948 }
4949
4950 last_reset = jiffies;
4951 if (hw_priv) {
4952 hw_dis_intr(hw);
4953 hw_disable(hw);
4954
4955 transmit_cleanup(hw_priv, 0);
4956 hw_reset_pkts(&hw->rx_desc_info);
4957 hw_reset_pkts(&hw->tx_desc_info);
4958 ksz_init_rx_buffers(hw_priv);
4959
4960 hw_reset(hw);
4961
4962 hw_set_desc_base(hw,
4963 hw->tx_desc_info.ring_phys,
4964 hw->rx_desc_info.ring_phys);
4965 hw_set_addr(hw);
4966 if (hw->all_multi)
4967 hw_set_multicast(hw, hw->all_multi);
4968 else if (hw->multi_list_size)
4969 hw_set_grp_addr(hw);
4970
4971 if (hw->dev_count > 1) {
4972 hw_set_add_addr(hw);
4973 for (port = 0; port < SWITCH_PORT_NUM; port++) {
4974 struct net_device *port_dev;
4975
4976 port_set_stp_state(hw, port,
4977 STP_STATE_DISABLED);
4978
4979 port_dev = hw->port_info[port].pdev;
4980 if (netif_running(port_dev))
4981 port_set_stp_state(hw, port,
4982 STP_STATE_SIMPLE);
4983 }
4984 }
4985
4986 hw_enable(hw);
4987 hw_ena_intr(hw);
4988 }
4989
4990 dev->trans_start = jiffies;
4991 netif_wake_queue(dev);
4992}
4993
4994static inline void csum_verified(struct sk_buff *skb)
4995{
4996 unsigned short protocol;
4997 struct iphdr *iph;
4998
4999 protocol = skb->protocol;
5000 skb_reset_network_header(skb);
5001 iph = (struct iphdr *) skb_network_header(skb);
5002 if (protocol == htons(ETH_P_8021Q)) {
5003 protocol = iph->tot_len;
5004 skb_set_network_header(skb, VLAN_HLEN);
5005 iph = (struct iphdr *) skb_network_header(skb);
5006 }
5007 if (protocol == htons(ETH_P_IP)) {
5008 if (iph->protocol == IPPROTO_TCP)
5009 skb->ip_summed = CHECKSUM_UNNECESSARY;
5010 }
5011}
5012
5013static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
5014 struct ksz_desc *desc, union desc_stat status)
5015{
5016 int packet_len;
5017 struct dev_priv *priv = netdev_priv(dev);
5018 struct dev_info *hw_priv = priv->adapter;
5019 struct ksz_dma_buf *dma_buf;
5020 struct sk_buff *skb;
5021 int rx_status;
5022
5023 /* Received length includes 4-byte CRC. */
5024 packet_len = status.rx.frame_len - 4;
5025
5026 dma_buf = DMA_BUFFER(desc);
5027 pci_dma_sync_single_for_cpu(
5028 hw_priv->pdev, dma_buf->dma, packet_len + 4,
5029 PCI_DMA_FROMDEVICE);
5030
5031 do {
5032 /* skb->data != skb->head */
5033 skb = dev_alloc_skb(packet_len + 2);
5034 if (!skb) {
5035 priv->stats.rx_dropped++;
5036 return -ENOMEM;
5037 }
5038
5039 /*
5040 * Align socket buffer in 4-byte boundary for better
5041 * performance.
5042 */
5043 skb_reserve(skb, 2);
5044
5045 memcpy(skb_put(skb, packet_len),
5046 dma_buf->skb->data, packet_len);
5047 } while (0);
5048
5049 skb->dev = dev;
5050
5051 skb->protocol = eth_type_trans(skb, dev);
5052
5053 if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP))
5054 csum_verified(skb);
5055
5056 /* Update receive statistics. */
5057 priv->stats.rx_packets++;
5058 priv->stats.rx_bytes += packet_len;
5059
5060 /* Notify upper layer for received packet. */
5061 dev->last_rx = jiffies;
5062
5063 rx_status = netif_rx(skb);
5064
5065 return 0;
5066}
5067
5068static int dev_rcv_packets(struct dev_info *hw_priv)
5069{
5070 int next;
5071 union desc_stat status;
5072 struct ksz_hw *hw = &hw_priv->hw;
5073 struct net_device *dev = hw->port_info[0].pdev;
5074 struct ksz_desc_info *info = &hw->rx_desc_info;
5075 int left = info->alloc;
5076 struct ksz_desc *desc;
5077 int received = 0;
5078
5079 next = info->next;
5080 while (left--) {
5081 /* Get next descriptor which is not hardware owned. */
5082 desc = &info->ring[next];
5083 status.data = le32_to_cpu(desc->phw->ctrl.data);
5084 if (status.rx.hw_owned)
5085 break;
5086
5087 /* Status valid only when last descriptor bit is set. */
5088 if (status.rx.last_desc && status.rx.first_desc) {
5089 if (rx_proc(dev, hw, desc, status))
5090 goto release_packet;
5091 received++;
5092 }
5093
5094release_packet:
5095 release_desc(desc);
5096 next++;
5097 next &= info->mask;
5098 }
5099 info->next = next;
5100
5101 return received;
5102}
5103
5104static int port_rcv_packets(struct dev_info *hw_priv)
5105{
5106 int next;
5107 union desc_stat status;
5108 struct ksz_hw *hw = &hw_priv->hw;
5109 struct net_device *dev = hw->port_info[0].pdev;
5110 struct ksz_desc_info *info = &hw->rx_desc_info;
5111 int left = info->alloc;
5112 struct ksz_desc *desc;
5113 int received = 0;
5114
5115 next = info->next;
5116 while (left--) {
5117 /* Get next descriptor which is not hardware owned. */
5118 desc = &info->ring[next];
5119 status.data = le32_to_cpu(desc->phw->ctrl.data);
5120 if (status.rx.hw_owned)
5121 break;
5122
5123 if (hw->dev_count > 1) {
5124 /* Get received port number. */
5125 int p = HW_TO_DEV_PORT(status.rx.src_port);
5126
5127 dev = hw->port_info[p].pdev;
5128 if (!netif_running(dev))
5129 goto release_packet;
5130 }
5131
5132 /* Status valid only when last descriptor bit is set. */
5133 if (status.rx.last_desc && status.rx.first_desc) {
5134 if (rx_proc(dev, hw, desc, status))
5135 goto release_packet;
5136 received++;
5137 }
5138
5139release_packet:
5140 release_desc(desc);
5141 next++;
5142 next &= info->mask;
5143 }
5144 info->next = next;
5145
5146 return received;
5147}
5148
5149static int dev_rcv_special(struct dev_info *hw_priv)
5150{
5151 int next;
5152 union desc_stat status;
5153 struct ksz_hw *hw = &hw_priv->hw;
5154 struct net_device *dev = hw->port_info[0].pdev;
5155 struct ksz_desc_info *info = &hw->rx_desc_info;
5156 int left = info->alloc;
5157 struct ksz_desc *desc;
5158 int received = 0;
5159
5160 next = info->next;
5161 while (left--) {
5162 /* Get next descriptor which is not hardware owned. */
5163 desc = &info->ring[next];
5164 status.data = le32_to_cpu(desc->phw->ctrl.data);
5165 if (status.rx.hw_owned)
5166 break;
5167
5168 if (hw->dev_count > 1) {
5169 /* Get received port number. */
5170 int p = HW_TO_DEV_PORT(status.rx.src_port);
5171
5172 dev = hw->port_info[p].pdev;
5173 if (!netif_running(dev))
5174 goto release_packet;
5175 }
5176
5177 /* Status valid only when last descriptor bit is set. */
5178 if (status.rx.last_desc && status.rx.first_desc) {
5179 /*
5180 * Receive without error. With receive errors
5181 * disabled, packets with receive errors will be
5182 * dropped, so no need to check the error bit.
5183 */
5184 if (!status.rx.error || (status.data &
5185 KS_DESC_RX_ERROR_COND) ==
5186 KS_DESC_RX_ERROR_TOO_LONG) {
5187 if (rx_proc(dev, hw, desc, status))
5188 goto release_packet;
5189 received++;
5190 } else {
5191 struct dev_priv *priv = netdev_priv(dev);
5192
5193 /* Update receive error statistics. */
5194 priv->port.counter[OID_COUNTER_RCV_ERROR]++;
5195 }
5196 }
5197
5198release_packet:
5199 release_desc(desc);
5200 next++;
5201 next &= info->mask;
5202 }
5203 info->next = next;
5204
5205 return received;
5206}
5207
5208static void rx_proc_task(unsigned long data)
5209{
5210 struct dev_info *hw_priv = (struct dev_info *) data;
5211 struct ksz_hw *hw = &hw_priv->hw;
5212
5213 if (!hw->enabled)
5214 return;
5215 if (unlikely(!hw_priv->dev_rcv(hw_priv))) {
5216
5217 /* In case receive process is suspended because of overrun. */
5218 hw_resume_rx(hw);
5219
5220 /* tasklets are interruptible. */
5221 spin_lock_irq(&hw_priv->hwlock);
5222 hw_turn_on_intr(hw, KS884X_INT_RX_MASK);
5223 spin_unlock_irq(&hw_priv->hwlock);
5224 } else {
5225 hw_ack_intr(hw, KS884X_INT_RX);
5226 tasklet_schedule(&hw_priv->rx_tasklet);
5227 }
5228}
5229
5230static void tx_proc_task(unsigned long data)
5231{
5232 struct dev_info *hw_priv = (struct dev_info *) data;
5233 struct ksz_hw *hw = &hw_priv->hw;
5234
5235 hw_ack_intr(hw, KS884X_INT_TX_MASK);
5236
5237 tx_done(hw_priv);
5238
5239 /* tasklets are interruptible. */
5240 spin_lock_irq(&hw_priv->hwlock);
5241 hw_turn_on_intr(hw, KS884X_INT_TX);
5242 spin_unlock_irq(&hw_priv->hwlock);
5243}
5244
5245static inline void handle_rx_stop(struct ksz_hw *hw)
5246{
5247 /* Receive just has been stopped. */
5248 if (0 == hw->rx_stop)
5249 hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
5250 else if (hw->rx_stop > 1) {
5251 if (hw->enabled && (hw->rx_cfg & DMA_RX_ENABLE)) {
5252 hw_start_rx(hw);
5253 } else {
5254 hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
5255 hw->rx_stop = 0;
5256 }
5257 } else
5258 /* Receive just has been started. */
5259 hw->rx_stop++;
5260}
5261
5262/**
5263 * netdev_intr - interrupt handling
5264 * @irq: Interrupt number.
5265 * @dev_id: Network device.
5266 *
5267 * This function is called by upper network layer to signal interrupt.
5268 *
5269 * Return IRQ_HANDLED if interrupt is handled.
5270 */
5271static irqreturn_t netdev_intr(int irq, void *dev_id)
5272{
5273 uint int_enable = 0;
5274 struct net_device *dev = (struct net_device *) dev_id;
5275 struct dev_priv *priv = netdev_priv(dev);
5276 struct dev_info *hw_priv = priv->adapter;
5277 struct ksz_hw *hw = &hw_priv->hw;
5278
5279 hw_read_intr(hw, &int_enable);
5280
5281 /* Not our interrupt! */
5282 if (!int_enable)
5283 return IRQ_NONE;
5284
5285 do {
5286 hw_ack_intr(hw, int_enable);
5287 int_enable &= hw->intr_mask;
5288
5289 if (unlikely(int_enable & KS884X_INT_TX_MASK)) {
5290 hw_dis_intr_bit(hw, KS884X_INT_TX_MASK);
5291 tasklet_schedule(&hw_priv->tx_tasklet);
5292 }
5293
5294 if (likely(int_enable & KS884X_INT_RX)) {
5295 hw_dis_intr_bit(hw, KS884X_INT_RX);
5296 tasklet_schedule(&hw_priv->rx_tasklet);
5297 }
5298
5299 if (unlikely(int_enable & KS884X_INT_RX_OVERRUN)) {
5300 priv->stats.rx_fifo_errors++;
5301 hw_resume_rx(hw);
5302 }
5303
5304 if (unlikely(int_enable & KS884X_INT_PHY)) {
5305 struct ksz_port *port = &priv->port;
5306
5307 hw->features |= LINK_INT_WORKING;
5308 port_get_link_speed(port);
5309 }
5310
5311 if (unlikely(int_enable & KS884X_INT_RX_STOPPED)) {
5312 handle_rx_stop(hw);
5313 break;
5314 }
5315
5316 if (unlikely(int_enable & KS884X_INT_TX_STOPPED)) {
5317 u32 data;
5318
5319 hw->intr_mask &= ~KS884X_INT_TX_STOPPED;
5320 printk(KERN_INFO "Tx stopped\n");
5321 data = readl(hw->io + KS_DMA_TX_CTRL);
5322 if (!(data & DMA_TX_ENABLE))
5323 printk(KERN_INFO "Tx disabled\n");
5324 break;
5325 }
5326 } while (0);
5327
5328 hw_ena_intr(hw);
5329
5330 return IRQ_HANDLED;
5331}
5332
5333/*
5334 * Linux network device functions
5335 */
5336
5337static unsigned long next_jiffies;
5338
5339#ifdef CONFIG_NET_POLL_CONTROLLER
5340static void netdev_netpoll(struct net_device *dev)
5341{
5342 struct dev_priv *priv = netdev_priv(dev);
5343 struct dev_info *hw_priv = priv->adapter;
5344
5345 hw_dis_intr(&hw_priv->hw);
5346 netdev_intr(dev->irq, dev);
5347}
5348#endif
5349
5350static void bridge_change(struct ksz_hw *hw)
5351{
5352 int port;
5353 u8 member;
5354 struct ksz_switch *sw = hw->ksz_switch;
5355
5356 /* No ports in forwarding state. */
5357 if (!sw->member) {
5358 port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
5359 sw_block_addr(hw);
5360 }
5361 for (port = 0; port < SWITCH_PORT_NUM; port++) {
5362 if (STP_STATE_FORWARDING == sw->port_cfg[port].stp_state)
5363 member = HOST_MASK | sw->member;
5364 else
5365 member = HOST_MASK | (1 << port);
5366 if (member != sw->port_cfg[port].member)
5367 sw_cfg_port_base_vlan(hw, port, member);
5368 }
5369}
5370
5371/**
5372 * netdev_close - close network device
5373 * @dev: Network device.
5374 *
5375 * This function process the close operation of network device. This is caused
5376 * by the user command "ifconfig ethX down."
5377 *
5378 * Return 0 if successful; otherwise an error code indicating failure.
5379 */
5380static int netdev_close(struct net_device *dev)
5381{
5382 struct dev_priv *priv = netdev_priv(dev);
5383 struct dev_info *hw_priv = priv->adapter;
5384 struct ksz_port *port = &priv->port;
5385 struct ksz_hw *hw = &hw_priv->hw;
5386 int pi;
5387
5388 netif_stop_queue(dev);
5389
5390 ksz_stop_timer(&priv->monitor_timer_info);
5391
5392 /* Need to shut the port manually in multiple device interfaces mode. */
5393 if (hw->dev_count > 1) {
5394 port_set_stp_state(hw, port->first_port, STP_STATE_DISABLED);
5395
5396 /* Port is closed. Need to change bridge setting. */
5397 if (hw->features & STP_SUPPORT) {
5398 pi = 1 << port->first_port;
5399 if (hw->ksz_switch->member & pi) {
5400 hw->ksz_switch->member &= ~pi;
5401 bridge_change(hw);
5402 }
5403 }
5404 }
5405 if (port->first_port > 0)
5406 hw_del_addr(hw, dev->dev_addr);
5407 if (!hw_priv->wol_enable)
5408 port_set_power_saving(port, true);
5409
5410 if (priv->multicast)
5411 --hw->all_multi;
5412 if (priv->promiscuous)
5413 --hw->promiscuous;
5414
5415 hw_priv->opened--;
5416 if (!(hw_priv->opened)) {
5417 ksz_stop_timer(&hw_priv->mib_timer_info);
5418 flush_work(&hw_priv->mib_read);
5419
5420 hw_dis_intr(hw);
5421 hw_disable(hw);
5422 hw_clr_multicast(hw);
5423
5424 /* Delay for receive task to stop scheduling itself. */
5425 msleep(2000 / HZ);
5426
5427 tasklet_disable(&hw_priv->rx_tasklet);
5428 tasklet_disable(&hw_priv->tx_tasklet);
5429 free_irq(dev->irq, hw_priv->dev);
5430
5431 transmit_cleanup(hw_priv, 0);
5432 hw_reset_pkts(&hw->rx_desc_info);
5433 hw_reset_pkts(&hw->tx_desc_info);
5434
5435 /* Clean out static MAC table when the switch is shutdown. */
5436 if (hw->features & STP_SUPPORT)
5437 sw_clr_sta_mac_table(hw);
5438 }
5439
5440 return 0;
5441}
5442
5443static void hw_cfg_huge_frame(struct dev_info *hw_priv, struct ksz_hw *hw)
5444{
5445 if (hw->ksz_switch) {
5446 u32 data;
5447
5448 data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
5449 if (hw->features & RX_HUGE_FRAME)
5450 data |= SWITCH_HUGE_PACKET;
5451 else
5452 data &= ~SWITCH_HUGE_PACKET;
5453 writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
5454 }
5455 if (hw->features & RX_HUGE_FRAME) {
5456 hw->rx_cfg |= DMA_RX_ERROR;
5457 hw_priv->dev_rcv = dev_rcv_special;
5458 } else {
5459 hw->rx_cfg &= ~DMA_RX_ERROR;
5460 if (hw->dev_count > 1)
5461 hw_priv->dev_rcv = port_rcv_packets;
5462 else
5463 hw_priv->dev_rcv = dev_rcv_packets;
5464 }
5465}
5466
5467static int prepare_hardware(struct net_device *dev)
5468{
5469 struct dev_priv *priv = netdev_priv(dev);
5470 struct dev_info *hw_priv = priv->adapter;
5471 struct ksz_hw *hw = &hw_priv->hw;
5472 int rc = 0;
5473
5474 /* Remember the network device that requests interrupts. */
5475 hw_priv->dev = dev;
5476 rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
5477 if (rc)
5478 return rc;
5479 tasklet_enable(&hw_priv->rx_tasklet);
5480 tasklet_enable(&hw_priv->tx_tasklet);
5481
5482 hw->promiscuous = 0;
5483 hw->all_multi = 0;
5484 hw->multi_list_size = 0;
5485
5486 hw_reset(hw);
5487
5488 hw_set_desc_base(hw,
5489 hw->tx_desc_info.ring_phys, hw->rx_desc_info.ring_phys);
5490 hw_set_addr(hw);
5491 hw_cfg_huge_frame(hw_priv, hw);
5492 ksz_init_rx_buffers(hw_priv);
5493 return 0;
5494}
5495
5496/**
5497 * netdev_open - open network device
5498 * @dev: Network device.
5499 *
5500 * This function process the open operation of network device. This is caused
5501 * by the user command "ifconfig ethX up."
5502 *
5503 * Return 0 if successful; otherwise an error code indicating failure.
5504 */
5505static int netdev_open(struct net_device *dev)
5506{
5507 struct dev_priv *priv = netdev_priv(dev);
5508 struct dev_info *hw_priv = priv->adapter;
5509 struct ksz_hw *hw = &hw_priv->hw;
5510 struct ksz_port *port = &priv->port;
5511 int i;
5512 int p;
5513 int rc = 0;
5514
5515 priv->multicast = 0;
5516 priv->promiscuous = 0;
5517
5518 /* Reset device statistics. */
5519 memset(&priv->stats, 0, sizeof(struct net_device_stats));
5520 memset((void *) port->counter, 0,
5521 (sizeof(u64) * OID_COUNTER_LAST));
5522
5523 if (!(hw_priv->opened)) {
5524 rc = prepare_hardware(dev);
5525 if (rc)
5526 return rc;
5527 for (i = 0; i < hw->mib_port_cnt; i++) {
5528 if (next_jiffies < jiffies)
5529 next_jiffies = jiffies + HZ * 2;
5530 else
5531 next_jiffies += HZ * 1;
5532 hw_priv->counter[i].time = next_jiffies;
5533 hw->port_mib[i].state = media_disconnected;
5534 port_init_cnt(hw, i);
5535 }
5536 if (hw->ksz_switch)
5537 hw->port_mib[HOST_PORT].state = media_connected;
5538 else {
5539 hw_add_wol_bcast(hw);
5540 hw_cfg_wol_pme(hw, 0);
5541 hw_clr_wol_pme_status(&hw_priv->hw);
5542 }
5543 }
5544 port_set_power_saving(port, false);
5545
5546 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
5547 /*
5548 * Initialize to invalid value so that link detection
5549 * is done.
5550 */
5551 hw->port_info[p].partner = 0xFF;
5552 hw->port_info[p].state = media_disconnected;
5553 }
5554
5555 /* Need to open the port in multiple device interfaces mode. */
5556 if (hw->dev_count > 1) {
5557 port_set_stp_state(hw, port->first_port, STP_STATE_SIMPLE);
5558 if (port->first_port > 0)
5559 hw_add_addr(hw, dev->dev_addr);
5560 }
5561
5562 port_get_link_speed(port);
5563 if (port->force_link)
5564 port_force_link_speed(port);
5565 else
5566 port_set_link_speed(port);
5567
5568 if (!(hw_priv->opened)) {
5569 hw_setup_intr(hw);
5570 hw_enable(hw);
5571 hw_ena_intr(hw);
5572
5573 if (hw->mib_port_cnt)
5574 ksz_start_timer(&hw_priv->mib_timer_info,
5575 hw_priv->mib_timer_info.period);
5576 }
5577
5578 hw_priv->opened++;
5579
5580 ksz_start_timer(&priv->monitor_timer_info,
5581 priv->monitor_timer_info.period);
5582
5583 priv->media_state = port->linked->state;
5584
5585 if (media_connected == priv->media_state)
5586 netif_carrier_on(dev);
5587 else
5588 netif_carrier_off(dev);
5589 if (netif_msg_link(priv))
5590 printk(KERN_INFO "%s link %s\n", dev->name,
5591 (media_connected == priv->media_state ?
5592 "on" : "off"));
5593
5594 netif_start_queue(dev);
5595
5596 return 0;
5597}
5598
5599/* RX errors = rx_errors */
5600/* RX dropped = rx_dropped */
5601/* RX overruns = rx_fifo_errors */
5602/* RX frame = rx_crc_errors + rx_frame_errors + rx_length_errors */
5603/* TX errors = tx_errors */
5604/* TX dropped = tx_dropped */
5605/* TX overruns = tx_fifo_errors */
5606/* TX carrier = tx_aborted_errors + tx_carrier_errors + tx_window_errors */
5607/* collisions = collisions */
5608
5609/**
5610 * netdev_query_statistics - query network device statistics
5611 * @dev: Network device.
5612 *
5613 * This function returns the statistics of the network device. The device
5614 * needs not be opened.
5615 *
5616 * Return network device statistics.
5617 */
5618static struct net_device_stats *netdev_query_statistics(struct net_device *dev)
5619{
5620 struct dev_priv *priv = netdev_priv(dev);
5621 struct ksz_port *port = &priv->port;
5622 struct ksz_hw *hw = &priv->adapter->hw;
5623 struct ksz_port_mib *mib;
5624 int i;
5625 int p;
5626
5627 priv->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR];
5628 priv->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR];
5629
5630 /* Reset to zero to add count later. */
5631 priv->stats.multicast = 0;
5632 priv->stats.collisions = 0;
5633 priv->stats.rx_length_errors = 0;
5634 priv->stats.rx_crc_errors = 0;
5635 priv->stats.rx_frame_errors = 0;
5636 priv->stats.tx_window_errors = 0;
5637
5638 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
5639 mib = &hw->port_mib[p];
5640
5641 priv->stats.multicast += (unsigned long)
5642 mib->counter[MIB_COUNTER_RX_MULTICAST];
5643
5644 priv->stats.collisions += (unsigned long)
5645 mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION];
5646
5647 priv->stats.rx_length_errors += (unsigned long)(
5648 mib->counter[MIB_COUNTER_RX_UNDERSIZE] +
5649 mib->counter[MIB_COUNTER_RX_FRAGMENT] +
5650 mib->counter[MIB_COUNTER_RX_OVERSIZE] +
5651 mib->counter[MIB_COUNTER_RX_JABBER]);
5652 priv->stats.rx_crc_errors += (unsigned long)
5653 mib->counter[MIB_COUNTER_RX_CRC_ERR];
5654 priv->stats.rx_frame_errors += (unsigned long)(
5655 mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] +
5656 mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]);
5657
5658 priv->stats.tx_window_errors += (unsigned long)
5659 mib->counter[MIB_COUNTER_TX_LATE_COLLISION];
5660 }
5661
5662 return &priv->stats;
5663}
5664
5665/**
5666 * netdev_set_mac_address - set network device MAC address
5667 * @dev: Network device.
5668 * @addr: Buffer of MAC address.
5669 *
5670 * This function is used to set the MAC address of the network device.
5671 *
5672 * Return 0 to indicate success.
5673 */
5674static int netdev_set_mac_address(struct net_device *dev, void *addr)
5675{
5676 struct dev_priv *priv = netdev_priv(dev);
5677 struct dev_info *hw_priv = priv->adapter;
5678 struct ksz_hw *hw = &hw_priv->hw;
5679 struct sockaddr *mac = addr;
5680 uint interrupt;
5681
5682 if (priv->port.first_port > 0)
5683 hw_del_addr(hw, dev->dev_addr);
5684 else {
5685 hw->mac_override = 1;
5686 memcpy(hw->override_addr, mac->sa_data, MAC_ADDR_LEN);
5687 }
5688
5689 memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN);
5690
5691 interrupt = hw_block_intr(hw);
5692
5693 if (priv->port.first_port > 0)
5694 hw_add_addr(hw, dev->dev_addr);
5695 else
5696 hw_set_addr(hw);
5697 hw_restore_intr(hw, interrupt);
5698
5699 return 0;
5700}
5701
5702static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
5703 struct ksz_hw *hw, int promiscuous)
5704{
5705 if (promiscuous != priv->promiscuous) {
5706 u8 prev_state = hw->promiscuous;
5707
5708 if (promiscuous)
5709 ++hw->promiscuous;
5710 else
5711 --hw->promiscuous;
5712 priv->promiscuous = promiscuous;
5713
5714 /* Turn on/off promiscuous mode. */
5715 if (hw->promiscuous <= 1 && prev_state <= 1)
5716 hw_set_promiscuous(hw, hw->promiscuous);
5717
5718 /*
5719 * Port is not in promiscuous mode, meaning it is released
5720 * from the bridge.
5721 */
5722 if ((hw->features & STP_SUPPORT) && !promiscuous &&
5723 dev->br_port) {
5724 struct ksz_switch *sw = hw->ksz_switch;
5725 int port = priv->port.first_port;
5726
5727 port_set_stp_state(hw, port, STP_STATE_DISABLED);
5728 port = 1 << port;
5729 if (sw->member & port) {
5730 sw->member &= ~port;
5731 bridge_change(hw);
5732 }
5733 }
5734 }
5735}
5736
5737static void dev_set_multicast(struct dev_priv *priv, struct ksz_hw *hw,
5738 int multicast)
5739{
5740 if (multicast != priv->multicast) {
5741 u8 all_multi = hw->all_multi;
5742
5743 if (multicast)
5744 ++hw->all_multi;
5745 else
5746 --hw->all_multi;
5747 priv->multicast = multicast;
5748
5749 /* Turn on/off all multicast mode. */
5750 if (hw->all_multi <= 1 && all_multi <= 1)
5751 hw_set_multicast(hw, hw->all_multi);
5752 }
5753}
5754
5755/**
5756 * netdev_set_rx_mode
5757 * @dev: Network device.
5758 *
5759 * This routine is used to set multicast addresses or put the network device
5760 * into promiscuous mode.
5761 */
5762static void netdev_set_rx_mode(struct net_device *dev)
5763{
5764 struct dev_priv *priv = netdev_priv(dev);
5765 struct dev_info *hw_priv = priv->adapter;
5766 struct ksz_hw *hw = &hw_priv->hw;
5767 struct dev_mc_list *mc_ptr;
5768 int multicast = (dev->flags & IFF_ALLMULTI);
5769
5770 dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC));
5771
5772 if (hw_priv->hw.dev_count > 1)
5773 multicast |= (dev->flags & IFF_MULTICAST);
5774 dev_set_multicast(priv, hw, multicast);
5775
5776 /* Cannot use different hashes in multiple device interfaces mode. */
5777 if (hw_priv->hw.dev_count > 1)
5778 return;
5779
5780 if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
5781 int i = 0;
5782
5783 /* List too big to support so turn on all multicast mode. */
5784 if (dev->mc_count > MAX_MULTICAST_LIST) {
5785 if (MAX_MULTICAST_LIST != hw->multi_list_size) {
5786 hw->multi_list_size = MAX_MULTICAST_LIST;
5787 ++hw->all_multi;
5788 hw_set_multicast(hw, hw->all_multi);
5789 }
5790 return;
5791 }
5792
5793 netdev_for_each_mc_addr(mc_ptr, dev) {
5794 if (!(*mc_ptr->dmi_addr & 1))
5795 continue;
5796 if (i >= MAX_MULTICAST_LIST)
5797 break;
5798 memcpy(hw->multi_list[i++], mc_ptr->dmi_addr,
5799 MAC_ADDR_LEN);
5800 }
5801 hw->multi_list_size = (u8) i;
5802 hw_set_grp_addr(hw);
5803 } else {
5804 if (MAX_MULTICAST_LIST == hw->multi_list_size) {
5805 --hw->all_multi;
5806 hw_set_multicast(hw, hw->all_multi);
5807 }
5808 hw->multi_list_size = 0;
5809 hw_clr_multicast(hw);
5810 }
5811}
5812
5813static int netdev_change_mtu(struct net_device *dev, int new_mtu)
5814{
5815 struct dev_priv *priv = netdev_priv(dev);
5816 struct dev_info *hw_priv = priv->adapter;
5817 struct ksz_hw *hw = &hw_priv->hw;
5818 int hw_mtu;
5819
5820 if (netif_running(dev))
5821 return -EBUSY;
5822
5823 /* Cannot use different MTU in multiple device interfaces mode. */
5824 if (hw->dev_count > 1)
5825 if (dev != hw_priv->dev)
5826 return 0;
5827 if (new_mtu < 60)
5828 return -EINVAL;
5829
5830 if (dev->mtu != new_mtu) {
5831 hw_mtu = new_mtu + ETHERNET_HEADER_SIZE + 4;
5832 if (hw_mtu > MAX_RX_BUF_SIZE)
5833 return -EINVAL;
5834 if (hw_mtu > REGULAR_RX_BUF_SIZE) {
5835 hw->features |= RX_HUGE_FRAME;
5836 hw_mtu = MAX_RX_BUF_SIZE;
5837 } else {
5838 hw->features &= ~RX_HUGE_FRAME;
5839 hw_mtu = REGULAR_RX_BUF_SIZE;
5840 }
5841 hw_mtu = (hw_mtu + 3) & ~3;
5842 hw_priv->mtu = hw_mtu;
5843 dev->mtu = new_mtu;
5844 }
5845 return 0;
5846}
5847
5848/**
5849 * netdev_ioctl - I/O control processing
5850 * @dev: Network device.
5851 * @ifr: Interface request structure.
5852 * @cmd: I/O control code.
5853 *
5854 * This function is used to process I/O control calls.
5855 *
5856 * Return 0 to indicate success.
5857 */
5858static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5859{
5860 struct dev_priv *priv = netdev_priv(dev);
5861 struct dev_info *hw_priv = priv->adapter;
5862 struct ksz_hw *hw = &hw_priv->hw;
5863 struct ksz_port *port = &priv->port;
5864 int rc;
5865 int result = 0;
5866 struct mii_ioctl_data *data = if_mii(ifr);
5867
5868 if (down_interruptible(&priv->proc_sem))
5869 return -ERESTARTSYS;
5870
5871 /* assume success */
5872 rc = 0;
5873 switch (cmd) {
5874 /* Get address of MII PHY in use. */
5875 case SIOCGMIIPHY:
5876 data->phy_id = priv->id;
5877
5878 /* Fallthrough... */
5879
5880 /* Read MII PHY register. */
5881 case SIOCGMIIREG:
5882 if (data->phy_id != priv->id || data->reg_num >= 6)
5883 result = -EIO;
5884 else
5885 hw_r_phy(hw, port->linked->port_id, data->reg_num,
5886 &data->val_out);
5887 break;
5888
5889 /* Write MII PHY register. */
5890 case SIOCSMIIREG:
5891 if (!capable(CAP_NET_ADMIN))
5892 result = -EPERM;
5893 else if (data->phy_id != priv->id || data->reg_num >= 6)
5894 result = -EIO;
5895 else
5896 hw_w_phy(hw, port->linked->port_id, data->reg_num,
5897 data->val_in);
5898 break;
5899
5900 default:
5901 result = -EOPNOTSUPP;
5902 }
5903
5904 up(&priv->proc_sem);
5905
5906 return result;
5907}
5908
5909/*
5910 * MII support
5911 */
5912
5913/**
5914 * mdio_read - read PHY register
5915 * @dev: Network device.
5916 * @phy_id: The PHY id.
5917 * @reg_num: The register number.
5918 *
5919 * This function returns the PHY register value.
5920 *
5921 * Return the register value.
5922 */
5923static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
5924{
5925 struct dev_priv *priv = netdev_priv(dev);
5926 struct ksz_port *port = &priv->port;
5927 struct ksz_hw *hw = port->hw;
5928 u16 val_out;
5929
5930 hw_r_phy(hw, port->linked->port_id, reg_num << 1, &val_out);
5931 return val_out;
5932}
5933
5934/**
5935 * mdio_write - set PHY register
5936 * @dev: Network device.
5937 * @phy_id: The PHY id.
5938 * @reg_num: The register number.
5939 * @val: The register value.
5940 *
5941 * This procedure sets the PHY register value.
5942 */
5943static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
5944{
5945 struct dev_priv *priv = netdev_priv(dev);
5946 struct ksz_port *port = &priv->port;
5947 struct ksz_hw *hw = port->hw;
5948 int i;
5949 int pi;
5950
5951 for (i = 0, pi = port->first_port; i < port->port_cnt; i++, pi++)
5952 hw_w_phy(hw, pi, reg_num << 1, val);
5953}
5954
5955/*
5956 * ethtool support
5957 */
5958
5959#define EEPROM_SIZE 0x40
5960
5961static u16 eeprom_data[EEPROM_SIZE] = { 0 };
5962
5963#define ADVERTISED_ALL \
5964 (ADVERTISED_10baseT_Half | \
5965 ADVERTISED_10baseT_Full | \
5966 ADVERTISED_100baseT_Half | \
5967 ADVERTISED_100baseT_Full)
5968
5969/* These functions use the MII functions in mii.c. */
5970
5971/**
5972 * netdev_get_settings - get network device settings
5973 * @dev: Network device.
5974 * @cmd: Ethtool command.
5975 *
5976 * This function queries the PHY and returns its state in the ethtool command.
5977 *
5978 * Return 0 if successful; otherwise an error code.
5979 */
5980static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5981{
5982 struct dev_priv *priv = netdev_priv(dev);
5983 struct dev_info *hw_priv = priv->adapter;
5984
5985 mutex_lock(&hw_priv->lock);
5986 mii_ethtool_gset(&priv->mii_if, cmd);
5987 cmd->advertising |= SUPPORTED_TP;
5988 mutex_unlock(&hw_priv->lock);
5989
5990 /* Save advertised settings for workaround in next function. */
5991 priv->advertising = cmd->advertising;
5992 return 0;
5993}
5994
5995/**
5996 * netdev_set_settings - set network device settings
5997 * @dev: Network device.
5998 * @cmd: Ethtool command.
5999 *
6000 * This function sets the PHY according to the ethtool command.
6001 *
6002 * Return 0 if successful; otherwise an error code.
6003 */
6004static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6005{
6006 struct dev_priv *priv = netdev_priv(dev);
6007 struct dev_info *hw_priv = priv->adapter;
6008 struct ksz_port *port = &priv->port;
6009 int rc;
6010
6011 /*
6012 * ethtool utility does not change advertised setting if auto
6013 * negotiation is not specified explicitly.
6014 */
6015 if (cmd->autoneg && priv->advertising == cmd->advertising) {
6016 cmd->advertising |= ADVERTISED_ALL;
6017 if (10 == cmd->speed)
6018 cmd->advertising &=
6019 ~(ADVERTISED_100baseT_Full |
6020 ADVERTISED_100baseT_Half);
6021 else if (100 == cmd->speed)
6022 cmd->advertising &=
6023 ~(ADVERTISED_10baseT_Full |
6024 ADVERTISED_10baseT_Half);
6025 if (0 == cmd->duplex)
6026 cmd->advertising &=
6027 ~(ADVERTISED_100baseT_Full |
6028 ADVERTISED_10baseT_Full);
6029 else if (1 == cmd->duplex)
6030 cmd->advertising &=
6031 ~(ADVERTISED_100baseT_Half |
6032 ADVERTISED_10baseT_Half);
6033 }
6034 mutex_lock(&hw_priv->lock);
6035 if (cmd->autoneg &&
6036 (cmd->advertising & ADVERTISED_ALL) ==
6037 ADVERTISED_ALL) {
6038 port->duplex = 0;
6039 port->speed = 0;
6040 port->force_link = 0;
6041 } else {
6042 port->duplex = cmd->duplex + 1;
6043 if (cmd->speed != 1000)
6044 port->speed = cmd->speed;
6045 if (cmd->autoneg)
6046 port->force_link = 0;
6047 else
6048 port->force_link = 1;
6049 }
6050 rc = mii_ethtool_sset(&priv->mii_if, cmd);
6051 mutex_unlock(&hw_priv->lock);
6052 return rc;
6053}
6054
6055/**
6056 * netdev_nway_reset - restart auto-negotiation
6057 * @dev: Network device.
6058 *
6059 * This function restarts the PHY for auto-negotiation.
6060 *
6061 * Return 0 if successful; otherwise an error code.
6062 */
6063static int netdev_nway_reset(struct net_device *dev)
6064{
6065 struct dev_priv *priv = netdev_priv(dev);
6066 struct dev_info *hw_priv = priv->adapter;
6067 int rc;
6068
6069 mutex_lock(&hw_priv->lock);
6070 rc = mii_nway_restart(&priv->mii_if);
6071 mutex_unlock(&hw_priv->lock);
6072 return rc;
6073}
6074
6075/**
6076 * netdev_get_link - get network device link status
6077 * @dev: Network device.
6078 *
6079 * This function gets the link status from the PHY.
6080 *
6081 * Return true if PHY is linked and false otherwise.
6082 */
6083static u32 netdev_get_link(struct net_device *dev)
6084{
6085 struct dev_priv *priv = netdev_priv(dev);
6086 int rc;
6087
6088 rc = mii_link_ok(&priv->mii_if);
6089 return rc;
6090}
6091
6092/**
6093 * netdev_get_drvinfo - get network driver information
6094 * @dev: Network device.
6095 * @info: Ethtool driver info data structure.
6096 *
6097 * This procedure returns the driver information.
6098 */
6099static void netdev_get_drvinfo(struct net_device *dev,
6100 struct ethtool_drvinfo *info)
6101{
6102 struct dev_priv *priv = netdev_priv(dev);
6103 struct dev_info *hw_priv = priv->adapter;
6104
6105 strcpy(info->driver, DRV_NAME);
6106 strcpy(info->version, DRV_VERSION);
6107 strcpy(info->bus_info, pci_name(hw_priv->pdev));
6108}
6109
6110/**
6111 * netdev_get_regs_len - get length of register dump
6112 * @dev: Network device.
6113 *
6114 * This function returns the length of the register dump.
6115 *
6116 * Return length of the register dump.
6117 */
6118static struct hw_regs {
6119 int start;
6120 int end;
6121} hw_regs_range[] = {
6122 { KS_DMA_TX_CTRL, KS884X_INTERRUPTS_STATUS },
6123 { KS_ADD_ADDR_0_LO, KS_ADD_ADDR_F_HI },
6124 { KS884X_ADDR_0_OFFSET, KS8841_WOL_FRAME_BYTE2_OFFSET },
6125 { KS884X_SIDER_P, KS8842_SGCR7_P },
6126 { KS8842_MACAR1_P, KS8842_TOSR8_P },
6127 { KS884X_P1MBCR_P, KS8842_P3ERCR_P },
6128 { 0, 0 }
6129};
6130
6131static int netdev_get_regs_len(struct net_device *dev)
6132{
6133 struct hw_regs *range = hw_regs_range;
6134 int regs_len = 0x10 * sizeof(u32);
6135
6136 while (range->end > range->start) {
6137 regs_len += (range->end - range->start + 3) / 4 * 4;
6138 range++;
6139 }
6140 return regs_len;
6141}
6142
6143/**
6144 * netdev_get_regs - get register dump
6145 * @dev: Network device.
6146 * @regs: Ethtool registers data structure.
6147 * @ptr: Buffer to store the register values.
6148 *
6149 * This procedure dumps the register values in the provided buffer.
6150 */
6151static void netdev_get_regs(struct net_device *dev, struct ethtool_regs *regs,
6152 void *ptr)
6153{
6154 struct dev_priv *priv = netdev_priv(dev);
6155 struct dev_info *hw_priv = priv->adapter;
6156 struct ksz_hw *hw = &hw_priv->hw;
6157 int *buf = (int *) ptr;
6158 struct hw_regs *range = hw_regs_range;
6159 int len;
6160
6161 mutex_lock(&hw_priv->lock);
6162 regs->version = 0;
6163 for (len = 0; len < 0x40; len += 4) {
6164 pci_read_config_dword(hw_priv->pdev, len, buf);
6165 buf++;
6166 }
6167 while (range->end > range->start) {
6168 for (len = range->start; len < range->end; len += 4) {
6169 *buf = readl(hw->io + len);
6170 buf++;
6171 }
6172 range++;
6173 }
6174 mutex_unlock(&hw_priv->lock);
6175}
6176
6177#define WOL_SUPPORT \
6178 (WAKE_PHY | WAKE_MAGIC | \
6179 WAKE_UCAST | WAKE_MCAST | \
6180 WAKE_BCAST | WAKE_ARP)
6181
6182/**
6183 * netdev_get_wol - get Wake-on-LAN support
6184 * @dev: Network device.
6185 * @wol: Ethtool Wake-on-LAN data structure.
6186 *
6187 * This procedure returns Wake-on-LAN support.
6188 */
6189static void netdev_get_wol(struct net_device *dev,
6190 struct ethtool_wolinfo *wol)
6191{
6192 struct dev_priv *priv = netdev_priv(dev);
6193 struct dev_info *hw_priv = priv->adapter;
6194
6195 wol->supported = hw_priv->wol_support;
6196 wol->wolopts = hw_priv->wol_enable;
6197 memset(&wol->sopass, 0, sizeof(wol->sopass));
6198}
6199
6200/**
6201 * netdev_set_wol - set Wake-on-LAN support
6202 * @dev: Network device.
6203 * @wol: Ethtool Wake-on-LAN data structure.
6204 *
6205 * This function sets Wake-on-LAN support.
6206 *
6207 * Return 0 if successful; otherwise an error code.
6208 */
6209static int netdev_set_wol(struct net_device *dev,
6210 struct ethtool_wolinfo *wol)
6211{
6212 struct dev_priv *priv = netdev_priv(dev);
6213 struct dev_info *hw_priv = priv->adapter;
6214
6215 /* Need to find a way to retrieve the device IP address. */
6216 u8 net_addr[] = { 192, 168, 1, 1 };
6217
6218 if (wol->wolopts & ~hw_priv->wol_support)
6219 return -EINVAL;
6220
6221 hw_priv->wol_enable = wol->wolopts;
6222
6223 /* Link wakeup cannot really be disabled. */
6224 if (wol->wolopts)
6225 hw_priv->wol_enable |= WAKE_PHY;
6226 hw_enable_wol(&hw_priv->hw, hw_priv->wol_enable, net_addr);
6227 return 0;
6228}
6229
6230/**
6231 * netdev_get_msglevel - get debug message level
6232 * @dev: Network device.
6233 *
6234 * This function returns current debug message level.
6235 *
6236 * Return current debug message flags.
6237 */
6238static u32 netdev_get_msglevel(struct net_device *dev)
6239{
6240 struct dev_priv *priv = netdev_priv(dev);
6241
6242 return priv->msg_enable;
6243}
6244
6245/**
6246 * netdev_set_msglevel - set debug message level
6247 * @dev: Network device.
6248 * @value: Debug message flags.
6249 *
6250 * This procedure sets debug message level.
6251 */
6252static void netdev_set_msglevel(struct net_device *dev, u32 value)
6253{
6254 struct dev_priv *priv = netdev_priv(dev);
6255
6256 priv->msg_enable = value;
6257}
6258
6259/**
6260 * netdev_get_eeprom_len - get EEPROM length
6261 * @dev: Network device.
6262 *
6263 * This function returns the length of the EEPROM.
6264 *
6265 * Return length of the EEPROM.
6266 */
6267static int netdev_get_eeprom_len(struct net_device *dev)
6268{
6269 return EEPROM_SIZE * 2;
6270}
6271
6272/**
6273 * netdev_get_eeprom - get EEPROM data
6274 * @dev: Network device.
6275 * @eeprom: Ethtool EEPROM data structure.
6276 * @data: Buffer to store the EEPROM data.
6277 *
6278 * This function dumps the EEPROM data in the provided buffer.
6279 *
6280 * Return 0 if successful; otherwise an error code.
6281 */
6282#define EEPROM_MAGIC 0x10A18842
6283
6284static int netdev_get_eeprom(struct net_device *dev,
6285 struct ethtool_eeprom *eeprom, u8 *data)
6286{
6287 struct dev_priv *priv = netdev_priv(dev);
6288 struct dev_info *hw_priv = priv->adapter;
6289 u8 *eeprom_byte = (u8 *) eeprom_data;
6290 int i;
6291 int len;
6292
6293 len = (eeprom->offset + eeprom->len + 1) / 2;
6294 for (i = eeprom->offset / 2; i < len; i++)
6295 eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
6296 eeprom->magic = EEPROM_MAGIC;
6297 memcpy(data, &eeprom_byte[eeprom->offset], eeprom->len);
6298
6299 return 0;
6300}
6301
6302/**
6303 * netdev_set_eeprom - write EEPROM data
6304 * @dev: Network device.
6305 * @eeprom: Ethtool EEPROM data structure.
6306 * @data: Data buffer.
6307 *
6308 * This function modifies the EEPROM data one byte at a time.
6309 *
6310 * Return 0 if successful; otherwise an error code.
6311 */
6312static int netdev_set_eeprom(struct net_device *dev,
6313 struct ethtool_eeprom *eeprom, u8 *data)
6314{
6315 struct dev_priv *priv = netdev_priv(dev);
6316 struct dev_info *hw_priv = priv->adapter;
6317 u16 eeprom_word[EEPROM_SIZE];
6318 u8 *eeprom_byte = (u8 *) eeprom_word;
6319 int i;
6320 int len;
6321
6322 if (eeprom->magic != EEPROM_MAGIC)
6323 return 1;
6324
6325 len = (eeprom->offset + eeprom->len + 1) / 2;
6326 for (i = eeprom->offset / 2; i < len; i++)
6327 eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
6328 memcpy(eeprom_word, eeprom_data, EEPROM_SIZE * 2);
6329 memcpy(&eeprom_byte[eeprom->offset], data, eeprom->len);
6330 for (i = 0; i < EEPROM_SIZE; i++)
6331 if (eeprom_word[i] != eeprom_data[i]) {
6332 eeprom_data[i] = eeprom_word[i];
6333 eeprom_write(&hw_priv->hw, i, eeprom_data[i]);
6334 }
6335
6336 return 0;
6337}
6338
6339/**
6340 * netdev_get_pauseparam - get flow control parameters
6341 * @dev: Network device.
6342 * @pause: Ethtool PAUSE settings data structure.
6343 *
6344 * This procedure returns the PAUSE control flow settings.
6345 */
6346static void netdev_get_pauseparam(struct net_device *dev,
6347 struct ethtool_pauseparam *pause)
6348{
6349 struct dev_priv *priv = netdev_priv(dev);
6350 struct dev_info *hw_priv = priv->adapter;
6351 struct ksz_hw *hw = &hw_priv->hw;
6352
6353 pause->autoneg = (hw->overrides & PAUSE_FLOW_CTRL) ? 0 : 1;
6354 if (!hw->ksz_switch) {
6355 pause->rx_pause =
6356 (hw->rx_cfg & DMA_RX_FLOW_ENABLE) ? 1 : 0;
6357 pause->tx_pause =
6358 (hw->tx_cfg & DMA_TX_FLOW_ENABLE) ? 1 : 0;
6359 } else {
6360 pause->rx_pause =
6361 (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
6362 SWITCH_RX_FLOW_CTRL)) ? 1 : 0;
6363 pause->tx_pause =
6364 (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
6365 SWITCH_TX_FLOW_CTRL)) ? 1 : 0;
6366 }
6367}
6368
6369/**
6370 * netdev_set_pauseparam - set flow control parameters
6371 * @dev: Network device.
6372 * @pause: Ethtool PAUSE settings data structure.
6373 *
6374 * This function sets the PAUSE control flow settings.
6375 * Not implemented yet.
6376 *
6377 * Return 0 if successful; otherwise an error code.
6378 */
6379static int netdev_set_pauseparam(struct net_device *dev,
6380 struct ethtool_pauseparam *pause)
6381{
6382 struct dev_priv *priv = netdev_priv(dev);
6383 struct dev_info *hw_priv = priv->adapter;
6384 struct ksz_hw *hw = &hw_priv->hw;
6385 struct ksz_port *port = &priv->port;
6386
6387 mutex_lock(&hw_priv->lock);
6388 if (pause->autoneg) {
6389 if (!pause->rx_pause && !pause->tx_pause)
6390 port->flow_ctrl = PHY_NO_FLOW_CTRL;
6391 else
6392 port->flow_ctrl = PHY_FLOW_CTRL;
6393 hw->overrides &= ~PAUSE_FLOW_CTRL;
6394 port->force_link = 0;
6395 if (hw->ksz_switch) {
6396 sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
6397 SWITCH_RX_FLOW_CTRL, 1);
6398 sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
6399 SWITCH_TX_FLOW_CTRL, 1);
6400 }
6401 port_set_link_speed(port);
6402 } else {
6403 hw->overrides |= PAUSE_FLOW_CTRL;
6404 if (hw->ksz_switch) {
6405 sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
6406 SWITCH_RX_FLOW_CTRL, pause->rx_pause);
6407 sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
6408 SWITCH_TX_FLOW_CTRL, pause->tx_pause);
6409 } else
6410 set_flow_ctrl(hw, pause->rx_pause, pause->tx_pause);
6411 }
6412 mutex_unlock(&hw_priv->lock);
6413
6414 return 0;
6415}
6416
6417/**
6418 * netdev_get_ringparam - get tx/rx ring parameters
6419 * @dev: Network device.
6420 * @pause: Ethtool RING settings data structure.
6421 *
6422 * This procedure returns the TX/RX ring settings.
6423 */
6424static void netdev_get_ringparam(struct net_device *dev,
6425 struct ethtool_ringparam *ring)
6426{
6427 struct dev_priv *priv = netdev_priv(dev);
6428 struct dev_info *hw_priv = priv->adapter;
6429 struct ksz_hw *hw = &hw_priv->hw;
6430
6431 ring->tx_max_pending = (1 << 9);
6432 ring->tx_pending = hw->tx_desc_info.alloc;
6433 ring->rx_max_pending = (1 << 9);
6434 ring->rx_pending = hw->rx_desc_info.alloc;
6435}
6436
6437#define STATS_LEN (TOTAL_PORT_COUNTER_NUM)
6438
6439static struct {
6440 char string[ETH_GSTRING_LEN];
6441} ethtool_stats_keys[STATS_LEN] = {
6442 { "rx_lo_priority_octets" },
6443 { "rx_hi_priority_octets" },
6444 { "rx_undersize_packets" },
6445 { "rx_fragments" },
6446 { "rx_oversize_packets" },
6447 { "rx_jabbers" },
6448 { "rx_symbol_errors" },
6449 { "rx_crc_errors" },
6450 { "rx_align_errors" },
6451 { "rx_mac_ctrl_packets" },
6452 { "rx_pause_packets" },
6453 { "rx_bcast_packets" },
6454 { "rx_mcast_packets" },
6455 { "rx_ucast_packets" },
6456 { "rx_64_or_less_octet_packets" },
6457 { "rx_65_to_127_octet_packets" },
6458 { "rx_128_to_255_octet_packets" },
6459 { "rx_256_to_511_octet_packets" },
6460 { "rx_512_to_1023_octet_packets" },
6461 { "rx_1024_to_1522_octet_packets" },
6462
6463 { "tx_lo_priority_octets" },
6464 { "tx_hi_priority_octets" },
6465 { "tx_late_collisions" },
6466 { "tx_pause_packets" },
6467 { "tx_bcast_packets" },
6468 { "tx_mcast_packets" },
6469 { "tx_ucast_packets" },
6470 { "tx_deferred" },
6471 { "tx_total_collisions" },
6472 { "tx_excessive_collisions" },
6473 { "tx_single_collisions" },
6474 { "tx_mult_collisions" },
6475
6476 { "rx_discards" },
6477 { "tx_discards" },
6478};
6479
6480/**
6481 * netdev_get_strings - get statistics identity strings
6482 * @dev: Network device.
6483 * @stringset: String set identifier.
6484 * @buf: Buffer to store the strings.
6485 *
6486 * This procedure returns the strings used to identify the statistics.
6487 */
6488static void netdev_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6489{
6490 struct dev_priv *priv = netdev_priv(dev);
6491 struct dev_info *hw_priv = priv->adapter;
6492 struct ksz_hw *hw = &hw_priv->hw;
6493
6494 if (ETH_SS_STATS == stringset)
6495 memcpy(buf, &ethtool_stats_keys,
6496 ETH_GSTRING_LEN * hw->mib_cnt);
6497}
6498
6499/**
6500 * netdev_get_sset_count - get statistics size
6501 * @dev: Network device.
6502 * @sset: The statistics set number.
6503 *
6504 * This function returns the size of the statistics to be reported.
6505 *
6506 * Return size of the statistics to be reported.
6507 */
6508static int netdev_get_sset_count(struct net_device *dev, int sset)
6509{
6510 struct dev_priv *priv = netdev_priv(dev);
6511 struct dev_info *hw_priv = priv->adapter;
6512 struct ksz_hw *hw = &hw_priv->hw;
6513
6514 switch (sset) {
6515 case ETH_SS_STATS:
6516 return hw->mib_cnt;
6517 default:
6518 return -EOPNOTSUPP;
6519 }
6520}
6521
6522/**
6523 * netdev_get_ethtool_stats - get network device statistics
6524 * @dev: Network device.
6525 * @stats: Ethtool statistics data structure.
6526 * @data: Buffer to store the statistics.
6527 *
6528 * This procedure returns the statistics.
6529 */
6530static void netdev_get_ethtool_stats(struct net_device *dev,
6531 struct ethtool_stats *stats, u64 *data)
6532{
6533 struct dev_priv *priv = netdev_priv(dev);
6534 struct dev_info *hw_priv = priv->adapter;
6535 struct ksz_hw *hw = &hw_priv->hw;
6536 struct ksz_port *port = &priv->port;
6537 int n_stats = stats->n_stats;
6538 int i;
6539 int n;
6540 int p;
6541 int rc;
6542 u64 counter[TOTAL_PORT_COUNTER_NUM];
6543
6544 mutex_lock(&hw_priv->lock);
6545 n = SWITCH_PORT_NUM;
6546 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
6547 if (media_connected == hw->port_mib[p].state) {
6548 hw_priv->counter[p].read = 1;
6549
6550 /* Remember first port that requests read. */
6551 if (n == SWITCH_PORT_NUM)
6552 n = p;
6553 }
6554 }
6555 mutex_unlock(&hw_priv->lock);
6556
6557 if (n < SWITCH_PORT_NUM)
6558 schedule_work(&hw_priv->mib_read);
6559
6560 if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) {
6561 p = n;
6562 rc = wait_event_interruptible_timeout(
6563 hw_priv->counter[p].counter,
6564 2 == hw_priv->counter[p].read,
6565 HZ * 1);
6566 } else
6567 for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) {
6568 if (0 == i) {
6569 rc = wait_event_interruptible_timeout(
6570 hw_priv->counter[p].counter,
6571 2 == hw_priv->counter[p].read,
6572 HZ * 2);
6573 } else if (hw->port_mib[p].cnt_ptr) {
6574 rc = wait_event_interruptible_timeout(
6575 hw_priv->counter[p].counter,
6576 2 == hw_priv->counter[p].read,
6577 HZ * 1);
6578 }
6579 }
6580
6581 get_mib_counters(hw, port->first_port, port->mib_port_cnt, counter);
6582 n = hw->mib_cnt;
6583 if (n > n_stats)
6584 n = n_stats;
6585 n_stats -= n;
6586 for (i = 0; i < n; i++)
6587 *data++ = counter[i];
6588}
6589
6590/**
6591 * netdev_get_rx_csum - get receive checksum support
6592 * @dev: Network device.
6593 *
6594 * This function gets receive checksum support setting.
6595 *
6596 * Return true if receive checksum is enabled; false otherwise.
6597 */
6598static u32 netdev_get_rx_csum(struct net_device *dev)
6599{
6600 struct dev_priv *priv = netdev_priv(dev);
6601 struct dev_info *hw_priv = priv->adapter;
6602 struct ksz_hw *hw = &hw_priv->hw;
6603
6604 return hw->rx_cfg &
6605 (DMA_RX_CSUM_UDP |
6606 DMA_RX_CSUM_TCP |
6607 DMA_RX_CSUM_IP);
6608}
6609
6610/**
6611 * netdev_set_rx_csum - set receive checksum support
6612 * @dev: Network device.
6613 * @data: Zero to disable receive checksum support.
6614 *
6615 * This function sets receive checksum support setting.
6616 *
6617 * Return 0 if successful; otherwise an error code.
6618 */
6619static int netdev_set_rx_csum(struct net_device *dev, u32 data)
6620{
6621 struct dev_priv *priv = netdev_priv(dev);
6622 struct dev_info *hw_priv = priv->adapter;
6623 struct ksz_hw *hw = &hw_priv->hw;
6624 u32 new_setting = hw->rx_cfg;
6625
6626 if (data)
6627 new_setting |=
6628 (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP |
6629 DMA_RX_CSUM_IP);
6630 else
6631 new_setting &=
6632 ~(DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP |
6633 DMA_RX_CSUM_IP);
6634 new_setting &= ~DMA_RX_CSUM_UDP;
6635 mutex_lock(&hw_priv->lock);
6636 if (new_setting != hw->rx_cfg) {
6637 hw->rx_cfg = new_setting;
6638 if (hw->enabled)
6639 writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
6640 }
6641 mutex_unlock(&hw_priv->lock);
6642 return 0;
6643}
6644
6645static struct ethtool_ops netdev_ethtool_ops = {
6646 .get_settings = netdev_get_settings,
6647 .set_settings = netdev_set_settings,
6648 .nway_reset = netdev_nway_reset,
6649 .get_link = netdev_get_link,
6650 .get_drvinfo = netdev_get_drvinfo,
6651 .get_regs_len = netdev_get_regs_len,
6652 .get_regs = netdev_get_regs,
6653 .get_wol = netdev_get_wol,
6654 .set_wol = netdev_set_wol,
6655 .get_msglevel = netdev_get_msglevel,
6656 .set_msglevel = netdev_set_msglevel,
6657 .get_eeprom_len = netdev_get_eeprom_len,
6658 .get_eeprom = netdev_get_eeprom,
6659 .set_eeprom = netdev_set_eeprom,
6660 .get_pauseparam = netdev_get_pauseparam,
6661 .set_pauseparam = netdev_set_pauseparam,
6662 .get_ringparam = netdev_get_ringparam,
6663 .get_strings = netdev_get_strings,
6664 .get_sset_count = netdev_get_sset_count,
6665 .get_ethtool_stats = netdev_get_ethtool_stats,
6666 .get_rx_csum = netdev_get_rx_csum,
6667 .set_rx_csum = netdev_set_rx_csum,
6668 .get_tx_csum = ethtool_op_get_tx_csum,
6669 .set_tx_csum = ethtool_op_set_tx_csum,
6670 .get_sg = ethtool_op_get_sg,
6671 .set_sg = ethtool_op_set_sg,
6672};
6673
6674/*
6675 * Hardware monitoring
6676 */
6677
6678static void update_link(struct net_device *dev, struct dev_priv *priv,
6679 struct ksz_port *port)
6680{
6681 if (priv->media_state != port->linked->state) {
6682 priv->media_state = port->linked->state;
6683 if (netif_running(dev)) {
6684 if (media_connected == priv->media_state)
6685 netif_carrier_on(dev);
6686 else
6687 netif_carrier_off(dev);
6688 if (netif_msg_link(priv))
6689 printk(KERN_INFO "%s link %s\n", dev->name,
6690 (media_connected == priv->media_state ?
6691 "on" : "off"));
6692 }
6693 }
6694}
6695
6696static void mib_read_work(struct work_struct *work)
6697{
6698 struct dev_info *hw_priv =
6699 container_of(work, struct dev_info, mib_read);
6700 struct ksz_hw *hw = &hw_priv->hw;
6701 struct ksz_port_mib *mib;
6702 int i;
6703
6704 next_jiffies = jiffies;
6705 for (i = 0; i < hw->mib_port_cnt; i++) {
6706 mib = &hw->port_mib[i];
6707
6708 /* Reading MIB counters or requested to read. */
6709 if (mib->cnt_ptr || 1 == hw_priv->counter[i].read) {
6710
6711 /* Need to process receive interrupt. */
6712 if (port_r_cnt(hw, i))
6713 break;
6714 hw_priv->counter[i].read = 0;
6715
6716 /* Finish reading counters. */
6717 if (0 == mib->cnt_ptr) {
6718 hw_priv->counter[i].read = 2;
6719 wake_up_interruptible(
6720 &hw_priv->counter[i].counter);
6721 }
6722 } else if (jiffies >= hw_priv->counter[i].time) {
6723 /* Only read MIB counters when the port is connected. */
6724 if (media_connected == mib->state)
6725 hw_priv->counter[i].read = 1;
6726 next_jiffies += HZ * 1 * hw->mib_port_cnt;
6727 hw_priv->counter[i].time = next_jiffies;
6728
6729 /* Port is just disconnected. */
6730 } else if (mib->link_down) {
6731 mib->link_down = 0;
6732
6733 /* Read counters one last time after link is lost. */
6734 hw_priv->counter[i].read = 1;
6735 }
6736 }
6737}
6738
6739static void mib_monitor(unsigned long ptr)
6740{
6741 struct dev_info *hw_priv = (struct dev_info *) ptr;
6742
6743 mib_read_work(&hw_priv->mib_read);
6744
6745 /* This is used to verify Wake-on-LAN is working. */
6746 if (hw_priv->pme_wait) {
6747 if (hw_priv->pme_wait <= jiffies) {
6748 hw_clr_wol_pme_status(&hw_priv->hw);
6749 hw_priv->pme_wait = 0;
6750 }
6751 } else if (hw_chk_wol_pme_status(&hw_priv->hw)) {
6752
6753 /* PME is asserted. Wait 2 seconds to clear it. */
6754 hw_priv->pme_wait = jiffies + HZ * 2;
6755 }
6756
6757 ksz_update_timer(&hw_priv->mib_timer_info);
6758}
6759
6760/**
6761 * dev_monitor - periodic monitoring
6762 * @ptr: Network device pointer.
6763 *
6764 * This routine is run in a kernel timer to monitor the network device.
6765 */
6766static void dev_monitor(unsigned long ptr)
6767{
6768 struct net_device *dev = (struct net_device *) ptr;
6769 struct dev_priv *priv = netdev_priv(dev);
6770 struct dev_info *hw_priv = priv->adapter;
6771 struct ksz_hw *hw = &hw_priv->hw;
6772 struct ksz_port *port = &priv->port;
6773
6774 if (!(hw->features & LINK_INT_WORKING))
6775 port_get_link_speed(port);
6776 update_link(dev, priv, port);
6777
6778 ksz_update_timer(&priv->monitor_timer_info);
6779}
6780
6781/*
6782 * Linux network device interface functions
6783 */
6784
6785/* Driver exported variables */
6786
6787static int msg_enable;
6788
6789static char *macaddr = ":";
6790static char *mac1addr = ":";
6791
6792/*
6793 * This enables multiple network device mode for KSZ8842, which contains a
6794 * switch with two physical ports. Some users like to take control of the
6795 * ports for running Spanning Tree Protocol. The driver will create an
6796 * additional eth? device for the other port.
6797 *
6798 * Some limitations are the network devices cannot have different MTU and
6799 * multicast hash tables.
6800 */
6801static int multi_dev;
6802
6803/*
6804 * As most users select multiple network device mode to use Spanning Tree
6805 * Protocol, this enables a feature in which most unicast and multicast packets
6806 * are forwarded inside the switch and not passed to the host. Only packets
6807 * that need the host's attention are passed to it. This prevents the host
6808 * wasting CPU time to examine each and every incoming packets and do the
6809 * forwarding itself.
6810 *
6811 * As the hack requires the private bridge header, the driver cannot compile
6812 * with just the kernel headers.
6813 *
6814 * Enabling STP support also turns on multiple network device mode.
6815 */
6816static int stp;
6817
6818/*
6819 * This enables fast aging in the KSZ8842 switch. Not sure what situation
6820 * needs that. However, fast aging is used to flush the dynamic MAC table when
6821 * STP suport is enabled.
6822 */
6823static int fast_aging;
6824
6825/**
6826 * netdev_init - initalize network device.
6827 * @dev: Network device.
6828 *
6829 * This function initializes the network device.
6830 *
6831 * Return 0 if successful; otherwise an error code indicating failure.
6832 */
6833static int __init netdev_init(struct net_device *dev)
6834{
6835 struct dev_priv *priv = netdev_priv(dev);
6836
6837 /* 500 ms timeout */
6838 ksz_init_timer(&priv->monitor_timer_info, 500 * HZ / 1000,
6839 dev_monitor, dev);
6840
6841 /* 500 ms timeout */
6842 dev->watchdog_timeo = HZ / 2;
6843
6844 dev->features |= NETIF_F_IP_CSUM;
6845
6846 /*
6847 * Hardware does not really support IPv6 checksum generation, but
6848 * driver actually runs faster with this on. Refer IPV6_CSUM_GEN_HACK.
6849 */
6850 dev->features |= NETIF_F_IPV6_CSUM;
6851 dev->features |= NETIF_F_SG;
6852
6853 sema_init(&priv->proc_sem, 1);
6854
6855 priv->mii_if.phy_id_mask = 0x1;
6856 priv->mii_if.reg_num_mask = 0x7;
6857 priv->mii_if.dev = dev;
6858 priv->mii_if.mdio_read = mdio_read;
6859 priv->mii_if.mdio_write = mdio_write;
6860 priv->mii_if.phy_id = priv->port.first_port + 1;
6861
6862 priv->msg_enable = netif_msg_init(msg_enable,
6863 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK));
6864
6865 return 0;
6866}
6867
6868static const struct net_device_ops netdev_ops = {
6869 .ndo_init = netdev_init,
6870 .ndo_open = netdev_open,
6871 .ndo_stop = netdev_close,
6872 .ndo_get_stats = netdev_query_statistics,
6873 .ndo_start_xmit = netdev_tx,
6874 .ndo_tx_timeout = netdev_tx_timeout,
6875 .ndo_change_mtu = netdev_change_mtu,
6876 .ndo_set_mac_address = netdev_set_mac_address,
6877 .ndo_do_ioctl = netdev_ioctl,
6878 .ndo_set_rx_mode = netdev_set_rx_mode,
6879#ifdef CONFIG_NET_POLL_CONTROLLER
6880 .ndo_poll_controller = netdev_netpoll,
6881#endif
6882};
6883
6884static void netdev_free(struct net_device *dev)
6885{
6886 if (dev->watchdog_timeo)
6887 unregister_netdev(dev);
6888
6889 free_netdev(dev);
6890}
6891
6892struct platform_info {
6893 struct dev_info dev_info;
6894 struct net_device *netdev[SWITCH_PORT_NUM];
6895};
6896
6897static int net_device_present;
6898
6899static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
6900{
6901 int i;
6902 int j;
6903 int got_num;
6904 int num;
6905
6906 i = j = num = got_num = 0;
6907 while (j < MAC_ADDR_LEN) {
6908 if (macaddr[i]) {
6909 got_num = 1;
6910 if ('0' <= macaddr[i] && macaddr[i] <= '9')
6911 num = num * 16 + macaddr[i] - '0';
6912 else if ('A' <= macaddr[i] && macaddr[i] <= 'F')
6913 num = num * 16 + 10 + macaddr[i] - 'A';
6914 else if ('a' <= macaddr[i] && macaddr[i] <= 'f')
6915 num = num * 16 + 10 + macaddr[i] - 'a';
6916 else if (':' == macaddr[i])
6917 got_num = 2;
6918 else
6919 break;
6920 } else if (got_num)
6921 got_num = 2;
6922 else
6923 break;
6924 if (2 == got_num) {
6925 if (MAIN_PORT == port) {
6926 hw_priv->hw.override_addr[j++] = (u8) num;
6927 hw_priv->hw.override_addr[5] +=
6928 hw_priv->hw.id;
6929 } else {
6930 hw_priv->hw.ksz_switch->other_addr[j++] =
6931 (u8) num;
6932 hw_priv->hw.ksz_switch->other_addr[5] +=
6933 hw_priv->hw.id;
6934 }
6935 num = got_num = 0;
6936 }
6937 i++;
6938 }
6939 if (MAC_ADDR_LEN == j) {
6940 if (MAIN_PORT == port)
6941 hw_priv->hw.mac_override = 1;
6942 }
6943}
6944
6945#define KS884X_DMA_MASK (~0x0UL)
6946
6947static void read_other_addr(struct ksz_hw *hw)
6948{
6949 int i;
6950 u16 data[3];
6951 struct ksz_switch *sw = hw->ksz_switch;
6952
6953 for (i = 0; i < 3; i++)
6954 data[i] = eeprom_read(hw, i + EEPROM_DATA_OTHER_MAC_ADDR);
6955 if ((data[0] || data[1] || data[2]) && data[0] != 0xffff) {
6956 sw->other_addr[5] = (u8) data[0];
6957 sw->other_addr[4] = (u8)(data[0] >> 8);
6958 sw->other_addr[3] = (u8) data[1];
6959 sw->other_addr[2] = (u8)(data[1] >> 8);
6960 sw->other_addr[1] = (u8) data[2];
6961 sw->other_addr[0] = (u8)(data[2] >> 8);
6962 }
6963}
6964
6965#ifndef PCI_VENDOR_ID_MICREL_KS
6966#define PCI_VENDOR_ID_MICREL_KS 0x16c6
6967#endif
6968
6969static int __init pcidev_init(struct pci_dev *pdev,
6970 const struct pci_device_id *id)
6971{
6972 struct net_device *dev;
6973 struct dev_priv *priv;
6974 struct dev_info *hw_priv;
6975 struct ksz_hw *hw;
6976 struct platform_info *info;
6977 struct ksz_port *port;
6978 unsigned long reg_base;
6979 unsigned long reg_len;
6980 int cnt;
6981 int i;
6982 int mib_port_count;
6983 int pi;
6984 int port_count;
6985 int result;
6986 char banner[80];
6987 struct ksz_switch *sw = NULL;
6988
6989 result = pci_enable_device(pdev);
6990 if (result)
6991 return result;
6992
6993 result = -ENODEV;
6994
6995 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
6996 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
6997 return result;
6998
6999 reg_base = pci_resource_start(pdev, 0);
7000 reg_len = pci_resource_len(pdev, 0);
7001 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0)
7002 return result;
7003
7004 if (!request_mem_region(reg_base, reg_len, DRV_NAME))
7005 return result;
7006 pci_set_master(pdev);
7007
7008 result = -ENOMEM;
7009
7010 info = kmalloc(sizeof(struct platform_info), GFP_KERNEL);
7011 if (!info)
7012 goto pcidev_init_dev_err;
7013 memset(info, 0, sizeof(struct platform_info));
7014
7015 hw_priv = &info->dev_info;
7016 hw_priv->pdev = pdev;
7017
7018 hw = &hw_priv->hw;
7019
7020 hw->io = ioremap(reg_base, reg_len);
7021 if (!hw->io)
7022 goto pcidev_init_io_err;
7023
7024 cnt = hw_init(hw);
7025 if (!cnt) {
7026 if (msg_enable & NETIF_MSG_PROBE)
7027 printk(KERN_ALERT "chip not detected\n");
7028 result = -ENODEV;
7029 goto pcidev_init_alloc_err;
7030 }
7031
7032 sprintf(banner, "%s\n", version);
7033 banner[13] = cnt + '0';
7034 ks_info(hw_priv, "%s", banner);
7035 ks_dbg(hw_priv, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
7036
7037 /* Assume device is KSZ8841. */
7038 hw->dev_count = 1;
7039 port_count = 1;
7040 mib_port_count = 1;
7041 hw->addr_list_size = 0;
7042 hw->mib_cnt = PORT_COUNTER_NUM;
7043 hw->mib_port_cnt = 1;
7044
7045 /* KSZ8842 has a switch with multiple ports. */
7046 if (2 == cnt) {
7047 if (fast_aging)
7048 hw->overrides |= FAST_AGING;
7049
7050 hw->mib_cnt = TOTAL_PORT_COUNTER_NUM;
7051
7052 /* Multiple network device interfaces are required. */
7053 if (multi_dev) {
7054 hw->dev_count = SWITCH_PORT_NUM;
7055 hw->addr_list_size = SWITCH_PORT_NUM - 1;
7056 }
7057
7058 /* Single network device has multiple ports. */
7059 if (1 == hw->dev_count) {
7060 port_count = SWITCH_PORT_NUM;
7061 mib_port_count = SWITCH_PORT_NUM;
7062 }
7063 hw->mib_port_cnt = TOTAL_PORT_NUM;
7064 hw->ksz_switch = kmalloc(sizeof(struct ksz_switch), GFP_KERNEL);
7065 if (!hw->ksz_switch)
7066 goto pcidev_init_alloc_err;
7067 memset(hw->ksz_switch, 0, sizeof(struct ksz_switch));
7068
7069 sw = hw->ksz_switch;
7070 }
7071 for (i = 0; i < hw->mib_port_cnt; i++)
7072 hw->port_mib[i].mib_start = 0;
7073
7074 hw->parent = hw_priv;
7075
7076 /* Default MTU is 1500. */
7077 hw_priv->mtu = (REGULAR_RX_BUF_SIZE + 3) & ~3;
7078
7079 if (ksz_alloc_mem(hw_priv))
7080 goto pcidev_init_mem_err;
7081
7082 hw_priv->hw.id = net_device_present;
7083
7084 spin_lock_init(&hw_priv->hwlock);
7085 mutex_init(&hw_priv->lock);
7086
7087 /* tasklet is enabled. */
7088 tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
7089 (unsigned long) hw_priv);
7090 tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
7091 (unsigned long) hw_priv);
7092
7093 /* tasklet_enable will decrement the atomic counter. */
7094 tasklet_disable(&hw_priv->rx_tasklet);
7095 tasklet_disable(&hw_priv->tx_tasklet);
7096
7097 for (i = 0; i < TOTAL_PORT_NUM; i++)
7098 init_waitqueue_head(&hw_priv->counter[i].counter);
7099
7100 if (macaddr[0] != ':')
7101 get_mac_addr(hw_priv, macaddr, MAIN_PORT);
7102
7103 /* Read MAC address and initialize override address if not overrided. */
7104 hw_read_addr(hw);
7105
7106 /* Multiple device interfaces mode requires a second MAC address. */
7107 if (hw->dev_count > 1) {
7108 memcpy(sw->other_addr, hw->override_addr, MAC_ADDR_LEN);
7109 read_other_addr(hw);
7110 if (mac1addr[0] != ':')
7111 get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
7112 }
7113
7114 hw_setup(hw);
7115 if (hw->ksz_switch)
7116 sw_setup(hw);
7117 else {
7118 hw_priv->wol_support = WOL_SUPPORT;
7119 hw_priv->wol_enable = 0;
7120 }
7121
7122 INIT_WORK(&hw_priv->mib_read, mib_read_work);
7123
7124 /* 500 ms timeout */
7125 ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000,
7126 mib_monitor, hw_priv);
7127
7128 for (i = 0; i < hw->dev_count; i++) {
7129 dev = alloc_etherdev(sizeof(struct dev_priv));
7130 if (!dev)
7131 goto pcidev_init_reg_err;
7132 info->netdev[i] = dev;
7133
7134 priv = netdev_priv(dev);
7135 priv->adapter = hw_priv;
7136 priv->id = net_device_present++;
7137
7138 port = &priv->port;
7139 port->port_cnt = port_count;
7140 port->mib_port_cnt = mib_port_count;
7141 port->first_port = i;
7142 port->flow_ctrl = PHY_FLOW_CTRL;
7143
7144 port->hw = hw;
7145 port->linked = &hw->port_info[port->first_port];
7146
7147 for (cnt = 0, pi = i; cnt < port_count; cnt++, pi++) {
7148 hw->port_info[pi].port_id = pi;
7149 hw->port_info[pi].pdev = dev;
7150 hw->port_info[pi].state = media_disconnected;
7151 }
7152
7153 dev->mem_start = (unsigned long) hw->io;
7154 dev->mem_end = dev->mem_start + reg_len - 1;
7155 dev->irq = pdev->irq;
7156 if (MAIN_PORT == i)
7157 memcpy(dev->dev_addr, hw_priv->hw.override_addr,
7158 MAC_ADDR_LEN);
7159 else {
7160 memcpy(dev->dev_addr, sw->other_addr,
7161 MAC_ADDR_LEN);
7162 if (!memcmp(sw->other_addr, hw->override_addr,
7163 MAC_ADDR_LEN))
7164 dev->dev_addr[5] += port->first_port;
7165 }
7166
7167 dev->netdev_ops = &netdev_ops;
7168 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7169 if (register_netdev(dev))
7170 goto pcidev_init_reg_err;
7171 port_set_power_saving(port, true);
7172 }
7173
7174 pci_dev_get(hw_priv->pdev);
7175 pci_set_drvdata(pdev, info);
7176 return 0;
7177
7178pcidev_init_reg_err:
7179 for (i = 0; i < hw->dev_count; i++) {
7180 if (info->netdev[i]) {
7181 netdev_free(info->netdev[i]);
7182 info->netdev[i] = NULL;
7183 }
7184 }
7185
7186pcidev_init_mem_err:
7187 ksz_free_mem(hw_priv);
7188 kfree(hw->ksz_switch);
7189
7190pcidev_init_alloc_err:
7191 iounmap(hw->io);
7192
7193pcidev_init_io_err:
7194 kfree(info);
7195
7196pcidev_init_dev_err:
7197 release_mem_region(reg_base, reg_len);
7198
7199 return result;
7200}
7201
7202static void pcidev_exit(struct pci_dev *pdev)
7203{
7204 int i;
7205 struct platform_info *info = pci_get_drvdata(pdev);
7206 struct dev_info *hw_priv = &info->dev_info;
7207
7208 pci_set_drvdata(pdev, NULL);
7209
7210 release_mem_region(pci_resource_start(pdev, 0),
7211 pci_resource_len(pdev, 0));
7212 for (i = 0; i < hw_priv->hw.dev_count; i++) {
7213 if (info->netdev[i])
7214 netdev_free(info->netdev[i]);
7215 }
7216 if (hw_priv->hw.io)
7217 iounmap(hw_priv->hw.io);
7218 ksz_free_mem(hw_priv);
7219 kfree(hw_priv->hw.ksz_switch);
7220 pci_dev_put(hw_priv->pdev);
7221 kfree(info);
7222}
7223
7224#ifdef CONFIG_PM
7225static int pcidev_resume(struct pci_dev *pdev)
7226{
7227 int i;
7228 struct platform_info *info = pci_get_drvdata(pdev);
7229 struct dev_info *hw_priv = &info->dev_info;
7230 struct ksz_hw *hw = &hw_priv->hw;
7231
7232 pci_set_power_state(pdev, PCI_D0);
7233 pci_restore_state(pdev);
7234 pci_enable_wake(pdev, PCI_D0, 0);
7235
7236 if (hw_priv->wol_enable)
7237 hw_cfg_wol_pme(hw, 0);
7238 for (i = 0; i < hw->dev_count; i++) {
7239 if (info->netdev[i]) {
7240 struct net_device *dev = info->netdev[i];
7241
7242 if (netif_running(dev)) {
7243 netdev_open(dev);
7244 netif_device_attach(dev);
7245 }
7246 }
7247 }
7248 return 0;
7249}
7250
7251static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
7252{
7253 int i;
7254 struct platform_info *info = pci_get_drvdata(pdev);
7255 struct dev_info *hw_priv = &info->dev_info;
7256 struct ksz_hw *hw = &hw_priv->hw;
7257
7258 /* Need to find a way to retrieve the device IP address. */
7259 u8 net_addr[] = { 192, 168, 1, 1 };
7260
7261 for (i = 0; i < hw->dev_count; i++) {
7262 if (info->netdev[i]) {
7263 struct net_device *dev = info->netdev[i];
7264
7265 if (netif_running(dev)) {
7266 netif_device_detach(dev);
7267 netdev_close(dev);
7268 }
7269 }
7270 }
7271 if (hw_priv->wol_enable) {
7272 hw_enable_wol(hw, hw_priv->wol_enable, net_addr);
7273 hw_cfg_wol_pme(hw, 1);
7274 }
7275
7276 pci_save_state(pdev);
7277 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
7278 pci_set_power_state(pdev, pci_choose_state(pdev, state));
7279 return 0;
7280}
7281#endif
7282
7283static char pcidev_name[] = "ksz884xp";
7284
7285static struct pci_device_id pcidev_table[] = {
7286 { PCI_VENDOR_ID_MICREL_KS, 0x8841,
7287 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
7288 { PCI_VENDOR_ID_MICREL_KS, 0x8842,
7289 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
7290 { 0 }
7291};
7292
7293MODULE_DEVICE_TABLE(pci, pcidev_table);
7294
7295static struct pci_driver pci_device_driver = {
7296#ifdef CONFIG_PM
7297 .suspend = pcidev_suspend,
7298 .resume = pcidev_resume,
7299#endif
7300 .name = pcidev_name,
7301 .id_table = pcidev_table,
7302 .probe = pcidev_init,
7303 .remove = pcidev_exit
7304};
7305
7306static int __init ksz884x_init_module(void)
7307{
7308 return pci_register_driver(&pci_device_driver);
7309}
7310
7311static void __exit ksz884x_cleanup_module(void)
7312{
7313 pci_unregister_driver(&pci_device_driver);
7314}
7315
7316module_init(ksz884x_init_module);
7317module_exit(ksz884x_cleanup_module);
7318
7319MODULE_DESCRIPTION("KSZ8841/2 PCI network driver");
7320MODULE_AUTHOR("Tristram Ha <Tristram.Ha@micrel.com>");
7321MODULE_LICENSE("GPL");
7322
7323module_param_named(message, msg_enable, int, 0);
7324MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
7325
7326module_param(macaddr, charp, 0);
7327module_param(mac1addr, charp, 0);
7328module_param(fast_aging, int, 0);
7329module_param(multi_dev, int, 0);
7330module_param(stp, int, 0);
7331MODULE_PARM_DESC(macaddr, "MAC address");
7332MODULE_PARM_DESC(mac1addr, "Second MAC address");
7333MODULE_PARM_DESC(fast_aging, "Fast aging");
7334MODULE_PARM_DESC(multi_dev, "Multiple device interfaces");
7335MODULE_PARM_DESC(stp, "STP support");
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index 8d7d3d4625f6..7b9447646f8a 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -1288,7 +1288,7 @@ static void set_multicast_list(struct net_device *dev)
1288 } else { 1288 } else {
1289 short multicast_table[4]; 1289 short multicast_table[4];
1290 int i; 1290 int i;
1291 int num_addrs=dev->mc_count; 1291 int num_addrs=netdev_mc_count(dev);
1292 if(dev->flags&IFF_ALLMULTI) 1292 if(dev->flags&IFF_ALLMULTI)
1293 num_addrs=1; 1293 num_addrs=1;
1294 /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */ 1294 /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index b117f7f8b194..443c39a3732f 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -1094,11 +1094,9 @@ static int __devinit i82596_probe(struct net_device *dev)
1094 return i; 1094 return i;
1095 }; 1095 };
1096 1096
1097 DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx,", 1097 DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
1098 dev->name, dev->base_addr)); 1098 dev->name, dev->base_addr, dev->dev_addr,
1099 for (i = 0; i < 6; i++) 1099 dev->irq));
1100 DEB(DEB_PROBE, printk(" %2.2X", dev->dev_addr[i]));
1101 DEB(DEB_PROBE, printk(" IRQ %d.\n", dev->irq));
1102 DEB(DEB_INIT, printk(KERN_INFO 1100 DEB(DEB_INIT, printk(KERN_INFO
1103 "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n", 1101 "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
1104 dev->name, dma, (int)sizeof(struct i596_dma), 1102 dev->name, dma, (int)sizeof(struct i596_dma),
@@ -1382,31 +1380,32 @@ static void set_multicast_list(struct net_device *dev)
1382 } 1380 }
1383 } 1381 }
1384 1382
1385 cnt = dev->mc_count; 1383 cnt = netdev_mc_count(dev);
1386 if (cnt > MAX_MC_CNT) { 1384 if (cnt > MAX_MC_CNT) {
1387 cnt = MAX_MC_CNT; 1385 cnt = MAX_MC_CNT;
1388 printk(KERN_NOTICE "%s: Only %d multicast addresses supported", 1386 printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
1389 dev->name, cnt); 1387 dev->name, cnt);
1390 } 1388 }
1391 1389
1392 if (dev->mc_count > 0) { 1390 if (!netdev_mc_empty(dev)) {
1393 struct dev_mc_list *dmi; 1391 struct dev_mc_list *dmi;
1394 unsigned char *cp; 1392 unsigned char *cp;
1395 struct mc_cmd *cmd; 1393 struct mc_cmd *cmd;
1396 1394
1397 cmd = &dma->mc_cmd; 1395 cmd = &dma->mc_cmd;
1398 cmd->cmd.command = SWAP16(CmdMulticastList); 1396 cmd->cmd.command = SWAP16(CmdMulticastList);
1399 cmd->mc_cnt = SWAP16(dev->mc_count * 6); 1397 cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
1400 cp = cmd->mc_addrs; 1398 cp = cmd->mc_addrs;
1401 for (dmi = dev->mc_list; 1399 netdev_for_each_mc_addr(dmi, dev) {
1402 cnt && dmi != NULL; 1400 if (!cnt--)
1403 dmi = dmi->next, cnt--, cp += 6) { 1401 break;
1404 memcpy(cp, dmi->dmi_addr, 6); 1402 memcpy(cp, dmi->dmi_addr, 6);
1405 if (i596_debug > 1) 1403 if (i596_debug > 1)
1406 DEB(DEB_MULTI, 1404 DEB(DEB_MULTI,
1407 printk(KERN_DEBUG 1405 printk(KERN_DEBUG
1408 "%s: Adding address %pM\n", 1406 "%s: Adding address %pM\n",
1409 dev->name, cp)); 1407 dev->name, cp));
1408 cp += 6;
1410 } 1409 }
1411 DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd)); 1410 DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
1412 i596_add_cmd(dev, &cmd->cmd); 1411 i596_add_cmd(dev, &cmd->cmd);
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index 57f25848fe80..56f66f485400 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -907,15 +907,8 @@ static inline void make_mc_bits(u8 *bits, struct net_device *dev)
907{ 907{
908 struct dev_mc_list *dmi; 908 struct dev_mc_list *dmi;
909 909
910 for (dmi=dev->mc_list; dmi; dmi=dmi->next) 910 netdev_for_each_mc_addr(dmi, dev) {
911 { 911 u32 crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
912 u32 crc;
913 if (dmi->dmi_addrlen != ETH_ALEN)
914 {
915 printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name);
916 continue;
917 }
918 crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
919 /* 912 /*
920 * The 8390 uses the 6 most significant bits of the 913 * The 8390 uses the 6 most significant bits of the
921 * CRC to index the multicast table. 914 * CRC to index the multicast table.
@@ -941,7 +934,7 @@ static void do_set_multicast_list(struct net_device *dev)
941 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) 934 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
942 { 935 {
943 memset(ei_local->mcfilter, 0, 8); 936 memset(ei_local->mcfilter, 0, 8);
944 if (dev->mc_list) 937 if (!netdev_mc_empty(dev))
945 make_mc_bits(ei_local->mcfilter, dev); 938 make_mc_bits(ei_local->mcfilter, dev);
946 } 939 }
947 else 940 else
@@ -975,7 +968,7 @@ static void do_set_multicast_list(struct net_device *dev)
975 968
976 if(dev->flags&IFF_PROMISC) 969 if(dev->flags&IFF_PROMISC)
977 ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR); 970 ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
978 else if(dev->flags&IFF_ALLMULTI || dev->mc_list) 971 else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
979 ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR); 972 ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
980 else 973 else
981 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); 974 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index a8522bd73ae7..a18e3485476e 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -224,6 +224,13 @@ static int temac_set_mac_address(struct net_device *ndev, void *address)
224 return 0; 224 return 0;
225} 225}
226 226
227static int netdev_set_mac_address(struct net_device *ndev, void *p)
228{
229 struct sockaddr *addr = p;
230
231 return temac_set_mac_address(ndev, addr->sa_data);
232}
233
227static void temac_set_multicast_list(struct net_device *ndev) 234static void temac_set_multicast_list(struct net_device *ndev)
228{ 235{
229 struct temac_local *lp = netdev_priv(ndev); 236 struct temac_local *lp = netdev_priv(ndev);
@@ -232,7 +239,7 @@ static void temac_set_multicast_list(struct net_device *ndev)
232 239
233 mutex_lock(&lp->indirect_mutex); 240 mutex_lock(&lp->indirect_mutex);
234 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 241 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
235 ndev->mc_count > MULTICAST_CAM_TABLE_NUM) { 242 netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) {
236 /* 243 /*
237 * We must make the kernel realise we had to move 244 * We must make the kernel realise we had to move
238 * into promisc mode or we start all out war on 245 * into promisc mode or we start all out war on
@@ -242,10 +249,11 @@ static void temac_set_multicast_list(struct net_device *ndev)
242 ndev->flags |= IFF_PROMISC; 249 ndev->flags |= IFF_PROMISC;
243 temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK); 250 temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
244 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 251 dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
245 } else if (ndev->mc_count) { 252 } else if (!netdev_mc_empty(ndev)) {
246 struct dev_mc_list *mclist = ndev->mc_list; 253 struct dev_mc_list *mclist;
247 for (i = 0; mclist && i < ndev->mc_count; i++) {
248 254
255 i = 0;
256 netdev_for_each_mc_addr(mclist, ndev) {
249 if (i >= MULTICAST_CAM_TABLE_NUM) 257 if (i >= MULTICAST_CAM_TABLE_NUM)
250 break; 258 break;
251 multi_addr_msw = ((mclist->dmi_addr[3] << 24) | 259 multi_addr_msw = ((mclist->dmi_addr[3] << 24) |
@@ -258,7 +266,7 @@ static void temac_set_multicast_list(struct net_device *ndev)
258 (mclist->dmi_addr[4]) | (i << 16)); 266 (mclist->dmi_addr[4]) | (i << 16));
259 temac_indirect_out32(lp, XTE_MAW1_OFFSET, 267 temac_indirect_out32(lp, XTE_MAW1_OFFSET,
260 multi_addr_lsw); 268 multi_addr_lsw);
261 mclist = mclist->next; 269 i++;
262 } 270 }
263 } else { 271 } else {
264 val = temac_indirect_in32(lp, XTE_AFM_OFFSET); 272 val = temac_indirect_in32(lp, XTE_AFM_OFFSET);
@@ -615,7 +623,7 @@ static void ll_temac_recv(struct net_device *ndev)
615 while ((bdstat & STS_CTRL_APP0_CMPLT)) { 623 while ((bdstat & STS_CTRL_APP0_CMPLT)) {
616 624
617 skb = lp->rx_skb[lp->rx_bd_ci]; 625 skb = lp->rx_skb[lp->rx_bd_ci];
618 length = cur_p->app4; 626 length = cur_p->app4 & 0x3FFF;
619 627
620 skb_vaddr = virt_to_bus(skb->data); 628 skb_vaddr = virt_to_bus(skb->data);
621 dma_unmap_single(ndev->dev.parent, skb_vaddr, length, 629 dma_unmap_single(ndev->dev.parent, skb_vaddr, length,
@@ -768,7 +776,7 @@ static const struct net_device_ops temac_netdev_ops = {
768 .ndo_open = temac_open, 776 .ndo_open = temac_open,
769 .ndo_stop = temac_stop, 777 .ndo_stop = temac_stop,
770 .ndo_start_xmit = temac_start_xmit, 778 .ndo_start_xmit = temac_start_xmit,
771 .ndo_set_mac_address = temac_set_mac_address, 779 .ndo_set_mac_address = netdev_set_mac_address,
772 //.ndo_set_multicast_list = temac_set_multicast_list, 780 //.ndo_set_multicast_list = temac_set_multicast_list,
773#ifdef CONFIG_NET_POLL_CONTROLLER 781#ifdef CONFIG_NET_POLL_CONTROLLER
774 .ndo_poll_controller = temac_poll_controller, 782 .ndo_poll_controller = temac_poll_controller,
@@ -938,6 +946,9 @@ static int __devexit temac_of_remove(struct of_device *op)
938 946
939static struct of_device_id temac_of_match[] __devinitdata = { 947static struct of_device_id temac_of_match[] __devinitdata = {
940 { .compatible = "xlnx,xps-ll-temac-1.01.b", }, 948 { .compatible = "xlnx,xps-ll-temac-1.01.b", },
949 { .compatible = "xlnx,xps-ll-temac-2.00.a", },
950 { .compatible = "xlnx,xps-ll-temac-2.02.a", },
951 { .compatible = "xlnx,xps-ll-temac-2.03.a", },
941 {}, 952 {},
942}; 953};
943MODULE_DEVICE_TABLE(of, temac_of_match); 954MODULE_DEVICE_TABLE(of, temac_of_match);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index b9fcc9819837..72b7949c91b1 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -72,7 +72,8 @@ struct pcpu_lstats {
72static netdev_tx_t loopback_xmit(struct sk_buff *skb, 72static netdev_tx_t loopback_xmit(struct sk_buff *skb,
73 struct net_device *dev) 73 struct net_device *dev)
74{ 74{
75 struct pcpu_lstats *pcpu_lstats, *lb_stats; 75 struct pcpu_lstats __percpu *pcpu_lstats;
76 struct pcpu_lstats *lb_stats;
76 int len; 77 int len;
77 78
78 skb_orphan(skb); 79 skb_orphan(skb);
@@ -80,7 +81,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
80 skb->protocol = eth_type_trans(skb, dev); 81 skb->protocol = eth_type_trans(skb, dev);
81 82
82 /* it's OK to use per_cpu_ptr() because BHs are off */ 83 /* it's OK to use per_cpu_ptr() because BHs are off */
83 pcpu_lstats = dev->ml_priv; 84 pcpu_lstats = (void __percpu __force *)dev->ml_priv;
84 lb_stats = this_cpu_ptr(pcpu_lstats); 85 lb_stats = this_cpu_ptr(pcpu_lstats);
85 86
86 len = skb->len; 87 len = skb->len;
@@ -95,14 +96,14 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
95 96
96static struct net_device_stats *loopback_get_stats(struct net_device *dev) 97static struct net_device_stats *loopback_get_stats(struct net_device *dev)
97{ 98{
98 const struct pcpu_lstats *pcpu_lstats; 99 const struct pcpu_lstats __percpu *pcpu_lstats;
99 struct net_device_stats *stats = &dev->stats; 100 struct net_device_stats *stats = &dev->stats;
100 unsigned long bytes = 0; 101 unsigned long bytes = 0;
101 unsigned long packets = 0; 102 unsigned long packets = 0;
102 unsigned long drops = 0; 103 unsigned long drops = 0;
103 int i; 104 int i;
104 105
105 pcpu_lstats = dev->ml_priv; 106 pcpu_lstats = (void __percpu __force *)dev->ml_priv;
106 for_each_possible_cpu(i) { 107 for_each_possible_cpu(i) {
107 const struct pcpu_lstats *lb_stats; 108 const struct pcpu_lstats *lb_stats;
108 109
@@ -135,19 +136,20 @@ static const struct ethtool_ops loopback_ethtool_ops = {
135 136
136static int loopback_dev_init(struct net_device *dev) 137static int loopback_dev_init(struct net_device *dev)
137{ 138{
138 struct pcpu_lstats *lstats; 139 struct pcpu_lstats __percpu *lstats;
139 140
140 lstats = alloc_percpu(struct pcpu_lstats); 141 lstats = alloc_percpu(struct pcpu_lstats);
141 if (!lstats) 142 if (!lstats)
142 return -ENOMEM; 143 return -ENOMEM;
143 144
144 dev->ml_priv = lstats; 145 dev->ml_priv = (void __force *)lstats;
145 return 0; 146 return 0;
146} 147}
147 148
148static void loopback_dev_free(struct net_device *dev) 149static void loopback_dev_free(struct net_device *dev)
149{ 150{
150 struct pcpu_lstats *lstats = dev->ml_priv; 151 struct pcpu_lstats __percpu *lstats =
152 (void __percpu __force *)dev->ml_priv;
151 153
152 free_percpu(lstats); 154 free_percpu(lstats);
153 free_netdev(dev); 155 free_netdev(dev);
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index e20fefc73c8b..3e3cc04defd0 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -1253,21 +1253,22 @@ static void set_multicast_list(struct net_device *dev) {
1253 1253
1254 if (i596_debug > 1) 1254 if (i596_debug > 1)
1255 printk ("%s: set multicast list %d\n", 1255 printk ("%s: set multicast list %d\n",
1256 dev->name, dev->mc_count); 1256 dev->name, netdev_mc_count(dev));
1257 1257
1258 if (dev->mc_count > 0) { 1258 if (!netdev_mc_empty(dev)) {
1259 struct dev_mc_list *dmi; 1259 struct dev_mc_list *dmi;
1260 char *cp; 1260 char *cp;
1261 cmd = kmalloc(sizeof(struct i596_cmd)+2+dev->mc_count*6, GFP_ATOMIC); 1261 cmd = kmalloc(sizeof(struct i596_cmd) + 2 +
1262 netdev_mc_count(dev) * 6, GFP_ATOMIC);
1262 if (cmd == NULL) { 1263 if (cmd == NULL) {
1263 printk (KERN_ERR "%s: set_multicast Memory squeeze.\n", dev->name); 1264 printk (KERN_ERR "%s: set_multicast Memory squeeze.\n", dev->name);
1264 return; 1265 return;
1265 } 1266 }
1266 cmd->command = CmdMulticastList; 1267 cmd->command = CmdMulticastList;
1267 *((unsigned short *) (cmd + 1)) = dev->mc_count * 6; 1268 *((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6;
1268 cp = ((char *)(cmd + 1))+2; 1269 cp = ((char *)(cmd + 1))+2;
1269 for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) { 1270 netdev_for_each_mc_addr(dmi, dev) {
1270 memcpy(cp, dmi,6); 1271 memcpy(cp, dmi->dmi_addr, 6);
1271 cp += 6; 1272 cp += 6;
1272 } 1273 }
1273 if (i596_debug & LOG_SRCDST) 1274 if (i596_debug & LOG_SRCDST)
@@ -1277,7 +1278,8 @@ static void set_multicast_list(struct net_device *dev) {
1277 if (lp->set_conf.pa_next != I596_NULL) { 1278 if (lp->set_conf.pa_next != I596_NULL) {
1278 return; 1279 return;
1279 } 1280 }
1280 if (dev->mc_count == 0 && !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) { 1281 if (netdev_mc_empty(dev) &&
1282 !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1281 lp->i596_config[8] &= ~0x01; 1283 lp->i596_config[8] &= ~0x01;
1282 } else { 1284 } else {
1283 lp->i596_config[8] |= 0x01; 1285 lp->i596_config[8] |= 0x01;
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index f8fa0c3f0f64..a8768672dc5a 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -17,6 +17,8 @@
17/* 2002-12-30: Try to support more cards, some clues from NetBSD driver */ 17/* 2002-12-30: Try to support more cards, some clues from NetBSD driver */
18/* 2003-12-26: Make sure Asante cards always work. */ 18/* 2003-12-26: Make sure Asante cards always work. */
19 19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
20#include <linux/module.h> 22#include <linux/module.h>
21#include <linux/kernel.h> 23#include <linux/kernel.h>
22#include <linux/types.h> 24#include <linux/types.h>
@@ -34,31 +36,36 @@
34#include <linux/etherdevice.h> 36#include <linux/etherdevice.h>
35#include <linux/skbuff.h> 37#include <linux/skbuff.h>
36#include <linux/bitops.h> 38#include <linux/bitops.h>
39#include <linux/io.h>
37 40
38#include <asm/system.h> 41#include <asm/system.h>
39#include <asm/io.h>
40#include <asm/dma.h> 42#include <asm/dma.h>
41#include <asm/hwtest.h> 43#include <asm/hwtest.h>
42#include <asm/macints.h> 44#include <asm/macints.h>
43 45
44static char version[] = 46static char version[] =
45 "mac8390.c: v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n"; 47 "v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n";
46 48
47#define EI_SHIFT(x) (ei_local->reg_offset[x]) 49#define EI_SHIFT(x) (ei_local->reg_offset[x])
48#define ei_inb(port) in_8(port) 50#define ei_inb(port) in_8(port)
49#define ei_outb(val,port) out_8(port,val) 51#define ei_outb(val, port) out_8(port, val)
50#define ei_inb_p(port) in_8(port) 52#define ei_inb_p(port) in_8(port)
51#define ei_outb_p(val,port) out_8(port,val) 53#define ei_outb_p(val, port) out_8(port, val)
52 54
53#include "lib8390.c" 55#include "lib8390.c"
54 56
55#define WD_START_PG 0x00 /* First page of TX buffer */ 57#define WD_START_PG 0x00 /* First page of TX buffer */
56#define CABLETRON_RX_START_PG 0x00 /* First page of RX buffer */ 58#define CABLETRON_RX_START_PG 0x00 /* First page of RX buffer */
57#define CABLETRON_RX_STOP_PG 0x30 /* Last page +1 of RX ring */ 59#define CABLETRON_RX_STOP_PG 0x30 /* Last page +1 of RX ring */
58#define CABLETRON_TX_START_PG CABLETRON_RX_STOP_PG /* First page of TX buffer */ 60#define CABLETRON_TX_START_PG CABLETRON_RX_STOP_PG
61 /* First page of TX buffer */
59 62
60/* Unfortunately it seems we have to hardcode these for the moment */ 63/*
61/* Shouldn't the card know about this? Does anyone know where to read it off the card? Do we trust the data provided by the card? */ 64 * Unfortunately it seems we have to hardcode these for the moment
65 * Shouldn't the card know about this?
66 * Does anyone know where to read it off the card?
67 * Do we trust the data provided by the card?
68 */
62 69
63#define DAYNA_8390_BASE 0x80000 70#define DAYNA_8390_BASE 0x80000
64#define DAYNA_8390_MEM 0x00000 71#define DAYNA_8390_MEM 0x00000
@@ -80,7 +87,7 @@ enum mac8390_type {
80 MAC8390_KINETICS, 87 MAC8390_KINETICS,
81}; 88};
82 89
83static const char * cardname[] = { 90static const char *cardname[] = {
84 "apple", 91 "apple",
85 "asante", 92 "asante",
86 "farallon", 93 "farallon",
@@ -90,7 +97,7 @@ static const char * cardname[] = {
90 "kinetics", 97 "kinetics",
91}; 98};
92 99
93static int word16[] = { 100static const int word16[] = {
94 1, /* apple */ 101 1, /* apple */
95 1, /* asante */ 102 1, /* asante */
96 1, /* farallon */ 103 1, /* farallon */
@@ -101,7 +108,7 @@ static int word16[] = {
101}; 108};
102 109
103/* on which cards do we use NuBus resources? */ 110/* on which cards do we use NuBus resources? */
104static int useresources[] = { 111static const int useresources[] = {
105 1, /* apple */ 112 1, /* apple */
106 1, /* asante */ 113 1, /* asante */
107 1, /* farallon */ 114 1, /* farallon */
@@ -117,22 +124,22 @@ enum mac8390_access {
117 ACCESS_16, 124 ACCESS_16,
118}; 125};
119 126
120extern int mac8390_memtest(struct net_device * dev); 127extern int mac8390_memtest(struct net_device *dev);
121static int mac8390_initdev(struct net_device * dev, struct nubus_dev * ndev, 128static int mac8390_initdev(struct net_device *dev, struct nubus_dev *ndev,
122 enum mac8390_type type); 129 enum mac8390_type type);
123 130
124static int mac8390_open(struct net_device * dev); 131static int mac8390_open(struct net_device *dev);
125static int mac8390_close(struct net_device * dev); 132static int mac8390_close(struct net_device *dev);
126static void mac8390_no_reset(struct net_device *dev); 133static void mac8390_no_reset(struct net_device *dev);
127static void interlan_reset(struct net_device *dev); 134static void interlan_reset(struct net_device *dev);
128 135
129/* Sane (32-bit chunk memory read/write) - Some Farallon and Apple do this*/ 136/* Sane (32-bit chunk memory read/write) - Some Farallon and Apple do this*/
130static void sane_get_8390_hdr(struct net_device *dev, 137static void sane_get_8390_hdr(struct net_device *dev,
131 struct e8390_pkt_hdr *hdr, int ring_page); 138 struct e8390_pkt_hdr *hdr, int ring_page);
132static void sane_block_input(struct net_device * dev, int count, 139static void sane_block_input(struct net_device *dev, int count,
133 struct sk_buff * skb, int ring_offset); 140 struct sk_buff *skb, int ring_offset);
134static void sane_block_output(struct net_device * dev, int count, 141static void sane_block_output(struct net_device *dev, int count,
135 const unsigned char * buf, const int start_page); 142 const unsigned char *buf, const int start_page);
136 143
137/* dayna_memcpy to and from card */ 144/* dayna_memcpy to and from card */
138static void dayna_memcpy_fromcard(struct net_device *dev, void *to, 145static void dayna_memcpy_fromcard(struct net_device *dev, void *to,
@@ -148,8 +155,8 @@ static void dayna_block_input(struct net_device *dev, int count,
148static void dayna_block_output(struct net_device *dev, int count, 155static void dayna_block_output(struct net_device *dev, int count,
149 const unsigned char *buf, int start_page); 156 const unsigned char *buf, int start_page);
150 157
151#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c)) 158#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
152#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c)) 159#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
153 160
154/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */ 161/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
155static void slow_sane_get_8390_hdr(struct net_device *dev, 162static void slow_sane_get_8390_hdr(struct net_device *dev,
@@ -164,70 +171,72 @@ static void word_memcpy_fromcard(void *tp, const void *fp, int count);
164static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev) 171static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
165{ 172{
166 switch (dev->dr_sw) { 173 switch (dev->dr_sw) {
167 case NUBUS_DRSW_3COM: 174 case NUBUS_DRSW_3COM:
168 switch (dev->dr_hw) { 175 switch (dev->dr_hw) {
169 case NUBUS_DRHW_APPLE_SONIC_NB: 176 case NUBUS_DRHW_APPLE_SONIC_NB:
170 case NUBUS_DRHW_APPLE_SONIC_LC: 177 case NUBUS_DRHW_APPLE_SONIC_LC:
171 case NUBUS_DRHW_SONNET: 178 case NUBUS_DRHW_SONNET:
172 return MAC8390_NONE; 179 return MAC8390_NONE;
173 break;
174 default:
175 return MAC8390_APPLE;
176 break;
177 }
178 break; 180 break;
179 181 default:
180 case NUBUS_DRSW_APPLE: 182 return MAC8390_APPLE;
181 switch (dev->dr_hw) {
182 case NUBUS_DRHW_ASANTE_LC:
183 return MAC8390_NONE;
184 break;
185 case NUBUS_DRHW_CABLETRON:
186 return MAC8390_CABLETRON;
187 break;
188 default:
189 return MAC8390_APPLE;
190 break;
191 }
192 break; 183 break;
184 }
185 break;
193 186
194 case NUBUS_DRSW_ASANTE: 187 case NUBUS_DRSW_APPLE:
195 return MAC8390_ASANTE; 188 switch (dev->dr_hw) {
189 case NUBUS_DRHW_ASANTE_LC:
190 return MAC8390_NONE;
196 break; 191 break;
197 192 case NUBUS_DRHW_CABLETRON:
198 case NUBUS_DRSW_TECHWORKS: 193 return MAC8390_CABLETRON;
199 case NUBUS_DRSW_DAYNA2:
200 case NUBUS_DRSW_DAYNA_LC:
201 if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
202 return MAC8390_CABLETRON;
203 else
204 return MAC8390_APPLE;
205 break; 194 break;
206 195 default:
207 case NUBUS_DRSW_FARALLON: 196 return MAC8390_APPLE;
208 return MAC8390_FARALLON;
209 break; 197 break;
198 }
199 break;
210 200
211 case NUBUS_DRSW_KINETICS: 201 case NUBUS_DRSW_ASANTE:
212 switch (dev->dr_hw) { 202 return MAC8390_ASANTE;
213 case NUBUS_DRHW_INTERLAN: 203 break;
214 return MAC8390_INTERLAN;
215 break;
216 default:
217 return MAC8390_KINETICS;
218 break;
219 }
220 break;
221 204
222 case NUBUS_DRSW_DAYNA: 205 case NUBUS_DRSW_TECHWORKS:
223 // These correspond to Dayna Sonic cards 206 case NUBUS_DRSW_DAYNA2:
224 // which use the macsonic driver 207 case NUBUS_DRSW_DAYNA_LC:
225 if (dev->dr_hw == NUBUS_DRHW_SMC9194 || 208 if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
226 dev->dr_hw == NUBUS_DRHW_INTERLAN ) 209 return MAC8390_CABLETRON;
227 return MAC8390_NONE; 210 else
228 else 211 return MAC8390_APPLE;
229 return MAC8390_DAYNA; 212 break;
213
214 case NUBUS_DRSW_FARALLON:
215 return MAC8390_FARALLON;
216 break;
217
218 case NUBUS_DRSW_KINETICS:
219 switch (dev->dr_hw) {
220 case NUBUS_DRHW_INTERLAN:
221 return MAC8390_INTERLAN;
222 break;
223 default:
224 return MAC8390_KINETICS;
230 break; 225 break;
226 }
227 break;
228
229 case NUBUS_DRSW_DAYNA:
230 /*
231 * These correspond to Dayna Sonic cards
232 * which use the macsonic driver
233 */
234 if (dev->dr_hw == NUBUS_DRHW_SMC9194 ||
235 dev->dr_hw == NUBUS_DRHW_INTERLAN)
236 return MAC8390_NONE;
237 else
238 return MAC8390_DAYNA;
239 break;
231 } 240 }
232 return MAC8390_NONE; 241 return MAC8390_NONE;
233} 242}
@@ -237,14 +246,14 @@ static enum mac8390_access __init mac8390_testio(volatile unsigned long membase)
237 unsigned long outdata = 0xA5A0B5B0; 246 unsigned long outdata = 0xA5A0B5B0;
238 unsigned long indata = 0x00000000; 247 unsigned long indata = 0x00000000;
239 /* Try writing 32 bits */ 248 /* Try writing 32 bits */
240 memcpy((char *)membase, (char *)&outdata, 4); 249 memcpy(membase, &outdata, 4);
241 /* Now compare them */ 250 /* Now compare them */
242 if (memcmp((char *)&outdata, (char *)membase, 4) == 0) 251 if (memcmp((char *)&outdata, (char *)membase, 4) == 0)
243 return ACCESS_32; 252 return ACCESS_32;
244 /* Write 16 bit output */ 253 /* Write 16 bit output */
245 word_memcpy_tocard((char *)membase, (char *)&outdata, 4); 254 word_memcpy_tocard(membase, &outdata, 4);
246 /* Now read it back */ 255 /* Now read it back */
247 word_memcpy_fromcard((char *)&indata, (char *)membase, 4); 256 word_memcpy_fromcard(&indata, membase, 4);
248 if (outdata == indata) 257 if (outdata == indata)
249 return ACCESS_16; 258 return ACCESS_16;
250 return ACCESS_UNKNOWN; 259 return ACCESS_UNKNOWN;
@@ -258,7 +267,7 @@ static int __init mac8390_memsize(unsigned long membase)
258 local_irq_save(flags); 267 local_irq_save(flags);
259 /* Check up to 32K in 4K increments */ 268 /* Check up to 32K in 4K increments */
260 for (i = 0; i < 8; i++) { 269 for (i = 0; i < 8; i++) {
261 volatile unsigned short *m = (unsigned short *) (membase + (i * 0x1000)); 270 volatile unsigned short *m = (unsigned short *)(membase + (i * 0x1000));
262 271
263 /* Unwriteable - we have a fully decoded card and the 272 /* Unwriteable - we have a fully decoded card and the
264 RAM end located */ 273 RAM end located */
@@ -273,28 +282,127 @@ static int __init mac8390_memsize(unsigned long membase)
273 282
274 /* check for partial decode and wrap */ 283 /* check for partial decode and wrap */
275 for (j = 0; j < i; j++) { 284 for (j = 0; j < i; j++) {
276 volatile unsigned short *p = (unsigned short *) (membase + (j * 0x1000)); 285 volatile unsigned short *p = (unsigned short *)(membase + (j * 0x1000));
277 if (*p != (0xA5A0 | j)) 286 if (*p != (0xA5A0 | j))
278 break; 287 break;
279 } 288 }
280 } 289 }
281 local_irq_restore(flags); 290 local_irq_restore(flags);
282 /* in any case, we stopped once we tried one block too many, 291 /*
283 or once we reached 32K */ 292 * in any case, we stopped once we tried one block too many,
284 return i * 0x1000; 293 * or once we reached 32K
294 */
295 return i * 0x1000;
296}
297
298static bool __init mac8390_init(struct net_device *dev, struct nubus_dev *ndev,
299 enum mac8390_type cardtype)
300{
301 struct nubus_dir dir;
302 struct nubus_dirent ent;
303 int offset;
304 volatile unsigned short *i;
305
306 printk_once(KERN_INFO pr_fmt("%s"), version);
307
308 dev->irq = SLOT2IRQ(ndev->board->slot);
309 /* This is getting to be a habit */
310 dev->base_addr = (ndev->board->slot_addr |
311 ((ndev->board->slot & 0xf) << 20));
312
313 /*
314 * Get some Nubus info - we will trust the card's idea
315 * of where its memory and registers are.
316 */
317
318 if (nubus_get_func_dir(ndev, &dir) == -1) {
319 pr_err("%s: Unable to get Nubus functional directory for slot %X!\n",
320 dev->name, ndev->board->slot);
321 return false;
322 }
323
324 /* Get the MAC address */
325 if (nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent) == -1) {
326 pr_info("%s: Couldn't get MAC address!\n", dev->name);
327 return false;
328 }
329
330 nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
331
332 if (useresources[cardtype] == 1) {
333 nubus_rewinddir(&dir);
334 if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS,
335 &ent) == -1) {
336 pr_err("%s: Memory offset resource for slot %X not found!\n",
337 dev->name, ndev->board->slot);
338 return false;
339 }
340 nubus_get_rsrc_mem(&offset, &ent, 4);
341 dev->mem_start = dev->base_addr + offset;
342 /* yes, this is how the Apple driver does it */
343 dev->base_addr = dev->mem_start + 0x10000;
344 nubus_rewinddir(&dir);
345 if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH,
346 &ent) == -1) {
347 pr_info("%s: Memory length resource for slot %X not found, probing\n",
348 dev->name, ndev->board->slot);
349 offset = mac8390_memsize(dev->mem_start);
350 } else {
351 nubus_get_rsrc_mem(&offset, &ent, 4);
352 }
353 dev->mem_end = dev->mem_start + offset;
354 } else {
355 switch (cardtype) {
356 case MAC8390_KINETICS:
357 case MAC8390_DAYNA: /* it's the same */
358 dev->base_addr = (int)(ndev->board->slot_addr +
359 DAYNA_8390_BASE);
360 dev->mem_start = (int)(ndev->board->slot_addr +
361 DAYNA_8390_MEM);
362 dev->mem_end = dev->mem_start +
363 mac8390_memsize(dev->mem_start);
364 break;
365 case MAC8390_INTERLAN:
366 dev->base_addr = (int)(ndev->board->slot_addr +
367 INTERLAN_8390_BASE);
368 dev->mem_start = (int)(ndev->board->slot_addr +
369 INTERLAN_8390_MEM);
370 dev->mem_end = dev->mem_start +
371 mac8390_memsize(dev->mem_start);
372 break;
373 case MAC8390_CABLETRON:
374 dev->base_addr = (int)(ndev->board->slot_addr +
375 CABLETRON_8390_BASE);
376 dev->mem_start = (int)(ndev->board->slot_addr +
377 CABLETRON_8390_MEM);
378 /* The base address is unreadable if 0x00
379 * has been written to the command register
380 * Reset the chip by writing E8390_NODMA +
381 * E8390_PAGE0 + E8390_STOP just to be
382 * sure
383 */
384 i = (void *)dev->base_addr;
385 *i = 0x21;
386 dev->mem_end = dev->mem_start +
387 mac8390_memsize(dev->mem_start);
388 break;
389
390 default:
391 pr_err("Card type %s is unsupported, sorry\n",
392 ndev->board->name);
393 return false;
394 }
395 }
396
397 return true;
285} 398}
286 399
287struct net_device * __init mac8390_probe(int unit) 400struct net_device * __init mac8390_probe(int unit)
288{ 401{
289 struct net_device *dev; 402 struct net_device *dev;
290 volatile unsigned short *i; 403 struct nubus_dev *ndev = NULL;
291 int version_disp = 0;
292 struct nubus_dev * ndev = NULL;
293 int err = -ENODEV; 404 int err = -ENODEV;
294 405
295 struct nubus_dir dir;
296 struct nubus_dirent ent;
297 int offset;
298 static unsigned int slots; 406 static unsigned int slots;
299 407
300 enum mac8390_type cardtype; 408 enum mac8390_type cardtype;
@@ -311,118 +419,19 @@ struct net_device * __init mac8390_probe(int unit)
311 if (unit >= 0) 419 if (unit >= 0)
312 sprintf(dev->name, "eth%d", unit); 420 sprintf(dev->name, "eth%d", unit);
313 421
314 while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET, ndev))) { 422 while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET,
423 ndev))) {
315 /* Have we seen it already? */ 424 /* Have we seen it already? */
316 if (slots & (1<<ndev->board->slot)) 425 if (slots & (1 << ndev->board->slot))
317 continue; 426 continue;
318 slots |= 1<<ndev->board->slot; 427 slots |= 1 << ndev->board->slot;
319 428
320 if ((cardtype = mac8390_ident(ndev)) == MAC8390_NONE) 429 cardtype = mac8390_ident(ndev);
430 if (cardtype == MAC8390_NONE)
321 continue; 431 continue;
322 432
323 if (version_disp == 0) { 433 if (!mac8390_init(dev, ndev, cardtype))
324 version_disp = 1;
325 printk(version);
326 }
327
328 dev->irq = SLOT2IRQ(ndev->board->slot);
329 /* This is getting to be a habit */
330 dev->base_addr = ndev->board->slot_addr | ((ndev->board->slot&0xf) << 20);
331
332 /* Get some Nubus info - we will trust the card's idea
333 of where its memory and registers are. */
334
335 if (nubus_get_func_dir(ndev, &dir) == -1) {
336 printk(KERN_ERR "%s: Unable to get Nubus functional"
337 " directory for slot %X!\n",
338 dev->name, ndev->board->slot);
339 continue; 434 continue;
340 }
341
342 /* Get the MAC address */
343 if ((nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent)) == -1) {
344 printk(KERN_INFO "%s: Couldn't get MAC address!\n",
345 dev->name);
346 continue;
347 } else {
348 nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
349 }
350
351 if (useresources[cardtype] == 1) {
352 nubus_rewinddir(&dir);
353 if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS, &ent) == -1) {
354 printk(KERN_ERR "%s: Memory offset resource"
355 " for slot %X not found!\n",
356 dev->name, ndev->board->slot);
357 continue;
358 }
359 nubus_get_rsrc_mem(&offset, &ent, 4);
360 dev->mem_start = dev->base_addr + offset;
361 /* yes, this is how the Apple driver does it */
362 dev->base_addr = dev->mem_start + 0x10000;
363 nubus_rewinddir(&dir);
364 if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH, &ent) == -1) {
365 printk(KERN_INFO "%s: Memory length resource"
366 " for slot %X not found"
367 ", probing\n",
368 dev->name, ndev->board->slot);
369 offset = mac8390_memsize(dev->mem_start);
370 } else {
371 nubus_get_rsrc_mem(&offset, &ent, 4);
372 }
373 dev->mem_end = dev->mem_start + offset;
374 } else {
375 switch (cardtype) {
376 case MAC8390_KINETICS:
377 case MAC8390_DAYNA: /* it's the same */
378 dev->base_addr =
379 (int)(ndev->board->slot_addr +
380 DAYNA_8390_BASE);
381 dev->mem_start =
382 (int)(ndev->board->slot_addr +
383 DAYNA_8390_MEM);
384 dev->mem_end =
385 dev->mem_start +
386 mac8390_memsize(dev->mem_start);
387 break;
388 case MAC8390_INTERLAN:
389 dev->base_addr =
390 (int)(ndev->board->slot_addr +
391 INTERLAN_8390_BASE);
392 dev->mem_start =
393 (int)(ndev->board->slot_addr +
394 INTERLAN_8390_MEM);
395 dev->mem_end =
396 dev->mem_start +
397 mac8390_memsize(dev->mem_start);
398 break;
399 case MAC8390_CABLETRON:
400 dev->base_addr =
401 (int)(ndev->board->slot_addr +
402 CABLETRON_8390_BASE);
403 dev->mem_start =
404 (int)(ndev->board->slot_addr +
405 CABLETRON_8390_MEM);
406 /* The base address is unreadable if 0x00
407 * has been written to the command register
408 * Reset the chip by writing E8390_NODMA +
409 * E8390_PAGE0 + E8390_STOP just to be
410 * sure
411 */
412 i = (void *)dev->base_addr;
413 *i = 0x21;
414 dev->mem_end =
415 dev->mem_start +
416 mac8390_memsize(dev->mem_start);
417 break;
418
419 default:
420 printk(KERN_ERR "Card type %s is"
421 " unsupported, sorry\n",
422 ndev->board->name);
423 continue;
424 }
425 }
426 435
427 /* Do the nasty 8390 stuff */ 436 /* Do the nasty 8390 stuff */
428 if (!mac8390_initdev(dev, ndev, cardtype)) 437 if (!mac8390_initdev(dev, ndev, cardtype))
@@ -458,7 +467,7 @@ int init_module(void)
458 dev_mac890[i] = dev; 467 dev_mac890[i] = dev;
459 } 468 }
460 if (!i) { 469 if (!i) {
461 printk(KERN_NOTICE "mac8390.c: No useable cards found, driver NOT installed.\n"); 470 pr_notice("No useable cards found, driver NOT installed.\n");
462 return -ENODEV; 471 return -ENODEV;
463 } 472 }
464 return 0; 473 return 0;
@@ -493,22 +502,23 @@ static const struct net_device_ops mac8390_netdev_ops = {
493#endif 502#endif
494}; 503};
495 504
496static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * ndev, 505static int __init mac8390_initdev(struct net_device *dev,
497 enum mac8390_type type) 506 struct nubus_dev *ndev,
507 enum mac8390_type type)
498{ 508{
499 static u32 fwrd4_offsets[16]={ 509 static u32 fwrd4_offsets[16] = {
500 0, 4, 8, 12, 510 0, 4, 8, 12,
501 16, 20, 24, 28, 511 16, 20, 24, 28,
502 32, 36, 40, 44, 512 32, 36, 40, 44,
503 48, 52, 56, 60 513 48, 52, 56, 60
504 }; 514 };
505 static u32 back4_offsets[16]={ 515 static u32 back4_offsets[16] = {
506 60, 56, 52, 48, 516 60, 56, 52, 48,
507 44, 40, 36, 32, 517 44, 40, 36, 32,
508 28, 24, 20, 16, 518 28, 24, 20, 16,
509 12, 8, 4, 0 519 12, 8, 4, 0
510 }; 520 };
511 static u32 fwrd2_offsets[16]={ 521 static u32 fwrd2_offsets[16] = {
512 0, 2, 4, 6, 522 0, 2, 4, 6,
513 8, 10, 12, 14, 523 8, 10, 12, 14,
514 16, 18, 20, 22, 524 16, 18, 20, 22,
@@ -526,47 +536,47 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd
526 536
527 /* Cabletron's TX/RX buffers are backwards */ 537 /* Cabletron's TX/RX buffers are backwards */
528 if (type == MAC8390_CABLETRON) { 538 if (type == MAC8390_CABLETRON) {
529 ei_status.tx_start_page = CABLETRON_TX_START_PG; 539 ei_status.tx_start_page = CABLETRON_TX_START_PG;
530 ei_status.rx_start_page = CABLETRON_RX_START_PG; 540 ei_status.rx_start_page = CABLETRON_RX_START_PG;
531 ei_status.stop_page = CABLETRON_RX_STOP_PG; 541 ei_status.stop_page = CABLETRON_RX_STOP_PG;
532 ei_status.rmem_start = dev->mem_start; 542 ei_status.rmem_start = dev->mem_start;
533 ei_status.rmem_end = dev->mem_start + CABLETRON_RX_STOP_PG*256; 543 ei_status.rmem_end = dev->mem_start + CABLETRON_RX_STOP_PG*256;
534 } else { 544 } else {
535 ei_status.tx_start_page = WD_START_PG; 545 ei_status.tx_start_page = WD_START_PG;
536 ei_status.rx_start_page = WD_START_PG + TX_PAGES; 546 ei_status.rx_start_page = WD_START_PG + TX_PAGES;
537 ei_status.stop_page = (dev->mem_end - dev->mem_start)/256; 547 ei_status.stop_page = (dev->mem_end - dev->mem_start)/256;
538 ei_status.rmem_start = dev->mem_start + TX_PAGES*256; 548 ei_status.rmem_start = dev->mem_start + TX_PAGES*256;
539 ei_status.rmem_end = dev->mem_end; 549 ei_status.rmem_end = dev->mem_end;
540 } 550 }
541 551
542 /* Fill in model-specific information and functions */ 552 /* Fill in model-specific information and functions */
543 switch(type) { 553 switch (type) {
544 case MAC8390_FARALLON: 554 case MAC8390_FARALLON:
545 case MAC8390_APPLE: 555 case MAC8390_APPLE:
546 switch(mac8390_testio(dev->mem_start)) { 556 switch (mac8390_testio(dev->mem_start)) {
547 case ACCESS_UNKNOWN: 557 case ACCESS_UNKNOWN:
548 printk("Don't know how to access card memory!\n"); 558 pr_info("Don't know how to access card memory!\n");
549 return -ENODEV; 559 return -ENODEV;
550 break; 560 break;
551 561
552 case ACCESS_16: 562 case ACCESS_16:
553 /* 16 bit card, register map is reversed */ 563 /* 16 bit card, register map is reversed */
554 ei_status.reset_8390 = &mac8390_no_reset; 564 ei_status.reset_8390 = &mac8390_no_reset;
555 ei_status.block_input = &slow_sane_block_input; 565 ei_status.block_input = &slow_sane_block_input;
556 ei_status.block_output = &slow_sane_block_output; 566 ei_status.block_output = &slow_sane_block_output;
557 ei_status.get_8390_hdr = &slow_sane_get_8390_hdr; 567 ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
558 ei_status.reg_offset = back4_offsets; 568 ei_status.reg_offset = back4_offsets;
559 break; 569 break;
560 570
561 case ACCESS_32: 571 case ACCESS_32:
562 /* 32 bit card, register map is reversed */ 572 /* 32 bit card, register map is reversed */
563 ei_status.reset_8390 = &mac8390_no_reset; 573 ei_status.reset_8390 = &mac8390_no_reset;
564 ei_status.block_input = &sane_block_input; 574 ei_status.block_input = &sane_block_input;
565 ei_status.block_output = &sane_block_output; 575 ei_status.block_output = &sane_block_output;
566 ei_status.get_8390_hdr = &sane_get_8390_hdr; 576 ei_status.get_8390_hdr = &sane_get_8390_hdr;
567 ei_status.reg_offset = back4_offsets; 577 ei_status.reg_offset = back4_offsets;
568 access_bitmode = 1; 578 access_bitmode = 1;
569 break; 579 break;
570 } 580 }
571 break; 581 break;
572 582
@@ -608,24 +618,25 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd
608 ei_status.block_input = &slow_sane_block_input; 618 ei_status.block_input = &slow_sane_block_input;
609 ei_status.block_output = &slow_sane_block_output; 619 ei_status.block_output = &slow_sane_block_output;
610 ei_status.get_8390_hdr = &slow_sane_get_8390_hdr; 620 ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
611 ei_status.reg_offset = fwrd4_offsets; 621 ei_status.reg_offset = fwrd4_offsets;
612 break; 622 break;
613 623
614 default: 624 default:
615 printk(KERN_ERR "Card type %s is unsupported, sorry\n", ndev->board->name); 625 pr_err("Card type %s is unsupported, sorry\n",
626 ndev->board->name);
616 return -ENODEV; 627 return -ENODEV;
617 } 628 }
618 629
619 __NS8390_init(dev, 0); 630 __NS8390_init(dev, 0);
620 631
621 /* Good, done, now spit out some messages */ 632 /* Good, done, now spit out some messages */
622 printk(KERN_INFO "%s: %s in slot %X (type %s)\n", 633 pr_info("%s: %s in slot %X (type %s)\n",
623 dev->name, ndev->board->name, ndev->board->slot, cardname[type]); 634 dev->name, ndev->board->name, ndev->board->slot,
624 printk(KERN_INFO 635 cardname[type]);
625 "MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n", 636 pr_info("MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n",
626 dev->dev_addr, dev->irq, 637 dev->dev_addr, dev->irq,
627 (unsigned int)(dev->mem_end - dev->mem_start) >> 10, 638 (unsigned int)(dev->mem_end - dev->mem_start) >> 10,
628 dev->mem_start, access_bitmode ? 32 : 16); 639 dev->mem_start, access_bitmode ? 32 : 16);
629 return 0; 640 return 0;
630} 641}
631 642
@@ -633,7 +644,7 @@ static int mac8390_open(struct net_device *dev)
633{ 644{
634 __ei_open(dev); 645 __ei_open(dev);
635 if (request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev)) { 646 if (request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev)) {
636 printk ("%s: unable to get IRQ %d.\n", dev->name, dev->irq); 647 pr_info("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
637 return -EAGAIN; 648 return -EAGAIN;
638 } 649 }
639 return 0; 650 return 0;
@@ -650,72 +661,71 @@ static void mac8390_no_reset(struct net_device *dev)
650{ 661{
651 ei_status.txing = 0; 662 ei_status.txing = 0;
652 if (ei_debug > 1) 663 if (ei_debug > 1)
653 printk("reset not supported\n"); 664 pr_info("reset not supported\n");
654 return; 665 return;
655} 666}
656 667
657static void interlan_reset(struct net_device *dev) 668static void interlan_reset(struct net_device *dev)
658{ 669{
659 unsigned char *target=nubus_slot_addr(IRQ2SLOT(dev->irq)); 670 unsigned char *target = nubus_slot_addr(IRQ2SLOT(dev->irq));
660 if (ei_debug > 1) 671 if (ei_debug > 1)
661 printk("Need to reset the NS8390 t=%lu...", jiffies); 672 pr_info("Need to reset the NS8390 t=%lu...", jiffies);
662 ei_status.txing = 0; 673 ei_status.txing = 0;
663 target[0xC0000] = 0; 674 target[0xC0000] = 0;
664 if (ei_debug > 1) 675 if (ei_debug > 1)
665 printk("reset complete\n"); 676 pr_cont("reset complete\n");
666 return; 677 return;
667} 678}
668 679
669/* dayna_memcpy_fromio/dayna_memcpy_toio */ 680/* dayna_memcpy_fromio/dayna_memcpy_toio */
670/* directly from daynaport.c by Alan Cox */ 681/* directly from daynaport.c by Alan Cox */
671static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from, int count) 682static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from,
683 int count)
672{ 684{
673 volatile unsigned char *ptr; 685 volatile unsigned char *ptr;
674 unsigned char *target=to; 686 unsigned char *target = to;
675 from<<=1; /* word, skip overhead */ 687 from <<= 1; /* word, skip overhead */
676 ptr=(unsigned char *)(dev->mem_start+from); 688 ptr = (unsigned char *)(dev->mem_start+from);
677 /* Leading byte? */ 689 /* Leading byte? */
678 if (from&2) { 690 if (from & 2) {
679 *target++ = ptr[-1]; 691 *target++ = ptr[-1];
680 ptr += 2; 692 ptr += 2;
681 count--; 693 count--;
682 } 694 }
683 while(count>=2) 695 while (count >= 2) {
684 {
685 *(unsigned short *)target = *(unsigned short volatile *)ptr; 696 *(unsigned short *)target = *(unsigned short volatile *)ptr;
686 ptr += 4; /* skip cruft */ 697 ptr += 4; /* skip cruft */
687 target += 2; 698 target += 2;
688 count-=2; 699 count -= 2;
689 } 700 }
690 /* Trailing byte? */ 701 /* Trailing byte? */
691 if(count) 702 if (count)
692 *target = *ptr; 703 *target = *ptr;
693} 704}
694 705
695static void dayna_memcpy_tocard(struct net_device *dev, int to, const void *from, int count) 706static void dayna_memcpy_tocard(struct net_device *dev, int to,
707 const void *from, int count)
696{ 708{
697 volatile unsigned short *ptr; 709 volatile unsigned short *ptr;
698 const unsigned char *src=from; 710 const unsigned char *src = from;
699 to<<=1; /* word, skip overhead */ 711 to <<= 1; /* word, skip overhead */
700 ptr=(unsigned short *)(dev->mem_start+to); 712 ptr = (unsigned short *)(dev->mem_start+to);
701 /* Leading byte? */ 713 /* Leading byte? */
702 if (to&2) { /* avoid a byte write (stomps on other data) */ 714 if (to & 2) { /* avoid a byte write (stomps on other data) */
703 ptr[-1] = (ptr[-1]&0xFF00)|*src++; 715 ptr[-1] = (ptr[-1]&0xFF00)|*src++;
704 ptr++; 716 ptr++;
705 count--; 717 count--;
706 } 718 }
707 while(count>=2) 719 while (count >= 2) {
708 { 720 *ptr++ = *(unsigned short *)src; /* Copy and */
709 *ptr++=*(unsigned short *)src; /* Copy and */
710 ptr++; /* skip cruft */ 721 ptr++; /* skip cruft */
711 src += 2; 722 src += 2;
712 count-=2; 723 count -= 2;
713 } 724 }
714 /* Trailing byte? */ 725 /* Trailing byte? */
715 if(count) 726 if (count) {
716 {
717 /* card doesn't like byte writes */ 727 /* card doesn't like byte writes */
718 *ptr=(*ptr&0x00FF)|(*src << 8); 728 *ptr = (*ptr & 0x00FF) | (*src << 8);
719 } 729 }
720} 730}
721 731
@@ -738,11 +748,14 @@ static void sane_block_input(struct net_device *dev, int count,
738 if (xfer_start + count > ei_status.rmem_end) { 748 if (xfer_start + count > ei_status.rmem_end) {
739 /* We must wrap the input move. */ 749 /* We must wrap the input move. */
740 int semi_count = ei_status.rmem_end - xfer_start; 750 int semi_count = ei_status.rmem_end - xfer_start;
741 memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, semi_count); 751 memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
752 semi_count);
742 count -= semi_count; 753 count -= semi_count;
743 memcpy_toio(skb->data + semi_count, (char *)ei_status.rmem_start, count); 754 memcpy_toio(skb->data + semi_count,
755 (char *)ei_status.rmem_start, count);
744 } else { 756 } else {
745 memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, count); 757 memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
758 count);
746 } 759 }
747} 760}
748 761
@@ -755,16 +768,18 @@ static void sane_block_output(struct net_device *dev, int count,
755} 768}
756 769
757/* dayna block input/output */ 770/* dayna block input/output */
758static void dayna_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) 771static void dayna_get_8390_hdr(struct net_device *dev,
772 struct e8390_pkt_hdr *hdr, int ring_page)
759{ 773{
760 unsigned long hdr_start = (ring_page - WD_START_PG)<<8; 774 unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
761 775
762 dayna_memcpy_fromcard(dev, (void *)hdr, hdr_start, 4); 776 dayna_memcpy_fromcard(dev, hdr, hdr_start, 4);
763 /* Fix endianness */ 777 /* Fix endianness */
764 hdr->count=(hdr->count&0xFF)<<8|(hdr->count>>8); 778 hdr->count = (hdr->count & 0xFF) << 8 | (hdr->count >> 8);
765} 779}
766 780
767static void dayna_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) 781static void dayna_block_input(struct net_device *dev, int count,
782 struct sk_buff *skb, int ring_offset)
768{ 783{
769 unsigned long xfer_base = ring_offset - (WD_START_PG<<8); 784 unsigned long xfer_base = ring_offset - (WD_START_PG<<8);
770 unsigned long xfer_start = xfer_base+dev->mem_start; 785 unsigned long xfer_start = xfer_base+dev->mem_start;
@@ -772,8 +787,7 @@ static void dayna_block_input(struct net_device *dev, int count, struct sk_buff
772 /* Note the offset math is done in card memory space which is word 787 /* Note the offset math is done in card memory space which is word
773 per long onto our space. */ 788 per long onto our space. */
774 789
775 if (xfer_start + count > ei_status.rmem_end) 790 if (xfer_start + count > ei_status.rmem_end) {
776 {
777 /* We must wrap the input move. */ 791 /* We must wrap the input move. */
778 int semi_count = ei_status.rmem_end - xfer_start; 792 int semi_count = ei_status.rmem_end - xfer_start;
779 dayna_memcpy_fromcard(dev, skb->data, xfer_base, semi_count); 793 dayna_memcpy_fromcard(dev, skb->data, xfer_base, semi_count);
@@ -781,15 +795,14 @@ static void dayna_block_input(struct net_device *dev, int count, struct sk_buff
781 dayna_memcpy_fromcard(dev, skb->data + semi_count, 795 dayna_memcpy_fromcard(dev, skb->data + semi_count,
782 ei_status.rmem_start - dev->mem_start, 796 ei_status.rmem_start - dev->mem_start,
783 count); 797 count);
784 } 798 } else {
785 else
786 {
787 dayna_memcpy_fromcard(dev, skb->data, xfer_base, count); 799 dayna_memcpy_fromcard(dev, skb->data, xfer_base, count);
788 } 800 }
789} 801}
790 802
791static void dayna_block_output(struct net_device *dev, int count, const unsigned char *buf, 803static void dayna_block_output(struct net_device *dev, int count,
792 int start_page) 804 const unsigned char *buf,
805 int start_page)
793{ 806{
794 long shmem = (start_page - WD_START_PG)<<8; 807 long shmem = (start_page - WD_START_PG)<<8;
795 808
@@ -797,40 +810,39 @@ static void dayna_block_output(struct net_device *dev, int count, const unsigned
797} 810}
798 811
799/* Cabletron block I/O */ 812/* Cabletron block I/O */
800static void slow_sane_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, 813static void slow_sane_get_8390_hdr(struct net_device *dev,
801 int ring_page) 814 struct e8390_pkt_hdr *hdr,
815 int ring_page)
802{ 816{
803 unsigned long hdr_start = (ring_page - WD_START_PG)<<8; 817 unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
804 word_memcpy_fromcard((void *)hdr, (char *)dev->mem_start+hdr_start, 4); 818 word_memcpy_fromcard(hdr, (char *)dev->mem_start + hdr_start, 4);
805 /* Register endianism - fix here rather than 8390.c */ 819 /* Register endianism - fix here rather than 8390.c */
806 hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8); 820 hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8);
807} 821}
808 822
809static void slow_sane_block_input(struct net_device *dev, int count, struct sk_buff *skb, 823static void slow_sane_block_input(struct net_device *dev, int count,
810 int ring_offset) 824 struct sk_buff *skb, int ring_offset)
811{ 825{
812 unsigned long xfer_base = ring_offset - (WD_START_PG<<8); 826 unsigned long xfer_base = ring_offset - (WD_START_PG<<8);
813 unsigned long xfer_start = xfer_base+dev->mem_start; 827 unsigned long xfer_start = xfer_base+dev->mem_start;
814 828
815 if (xfer_start + count > ei_status.rmem_end) 829 if (xfer_start + count > ei_status.rmem_end) {
816 {
817 /* We must wrap the input move. */ 830 /* We must wrap the input move. */
818 int semi_count = ei_status.rmem_end - xfer_start; 831 int semi_count = ei_status.rmem_end - xfer_start;
819 word_memcpy_fromcard(skb->data, (char *)dev->mem_start + 832 word_memcpy_fromcard(skb->data,
820 xfer_base, semi_count); 833 (char *)dev->mem_start + xfer_base,
834 semi_count);
821 count -= semi_count; 835 count -= semi_count;
822 word_memcpy_fromcard(skb->data + semi_count, 836 word_memcpy_fromcard(skb->data + semi_count,
823 (char *)ei_status.rmem_start, count); 837 (char *)ei_status.rmem_start, count);
824 } 838 } else {
825 else 839 word_memcpy_fromcard(skb->data,
826 { 840 (char *)dev->mem_start + xfer_base, count);
827 word_memcpy_fromcard(skb->data, (char *)dev->mem_start +
828 xfer_base, count);
829 } 841 }
830} 842}
831 843
832static void slow_sane_block_output(struct net_device *dev, int count, const unsigned char *buf, 844static void slow_sane_block_output(struct net_device *dev, int count,
833 int start_page) 845 const unsigned char *buf, int start_page)
834{ 846{
835 long shmem = (start_page - WD_START_PG)<<8; 847 long shmem = (start_page - WD_START_PG)<<8;
836 848
@@ -843,10 +855,10 @@ static void word_memcpy_tocard(void *tp, const void *fp, int count)
843 const unsigned short *from = fp; 855 const unsigned short *from = fp;
844 856
845 count++; 857 count++;
846 count/=2; 858 count /= 2;
847 859
848 while(count--) 860 while (count--)
849 *to++=*from++; 861 *to++ = *from++;
850} 862}
851 863
852static void word_memcpy_fromcard(void *tp, const void *fp, int count) 864static void word_memcpy_fromcard(void *tp, const void *fp, int count)
@@ -855,10 +867,10 @@ static void word_memcpy_fromcard(void *tp, const void *fp, int count)
855 const volatile unsigned short *from = fp; 867 const volatile unsigned short *from = fp;
856 868
857 count++; 869 count++;
858 count/=2; 870 count /= 2;
859 871
860 while(count--) 872 while (count--)
861 *to++=*from++; 873 *to++ = *from++;
862} 874}
863 875
864 876
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c
index 23b633e2ac42..c292a608f9a9 100644
--- a/drivers/net/mac89x0.c
+++ b/drivers/net/mac89x0.c
@@ -568,9 +568,7 @@ static void set_multicast_list(struct net_device *dev)
568 if(dev->flags&IFF_PROMISC) 568 if(dev->flags&IFF_PROMISC)
569 { 569 {
570 lp->rx_mode = RX_ALL_ACCEPT; 570 lp->rx_mode = RX_ALL_ACCEPT;
571 } 571 } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
572 else if((dev->flags&IFF_ALLMULTI)||dev->mc_list)
573 {
574 /* The multicast-accept list is initialized to accept-all, and we 572 /* The multicast-accept list is initialized to accept-all, and we
575 rely on higher-level filtering for now. */ 573 rely on higher-level filtering for now. */
576 lp->rx_mode = RX_MULTCAST_ACCEPT; 574 lp->rx_mode = RX_MULTCAST_ACCEPT;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 1d0d4d9ab623..c8a18a6203c8 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -189,18 +189,11 @@ static void macb_handle_link_change(struct net_device *dev)
189static int macb_mii_probe(struct net_device *dev) 189static int macb_mii_probe(struct net_device *dev)
190{ 190{
191 struct macb *bp = netdev_priv(dev); 191 struct macb *bp = netdev_priv(dev);
192 struct phy_device *phydev = NULL; 192 struct phy_device *phydev;
193 struct eth_platform_data *pdata; 193 struct eth_platform_data *pdata;
194 int phy_addr; 194 int ret;
195
196 /* find the first phy */
197 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
198 if (bp->mii_bus->phy_map[phy_addr]) {
199 phydev = bp->mii_bus->phy_map[phy_addr];
200 break;
201 }
202 }
203 195
196 phydev = phy_find_first(bp->mii_bus);
204 if (!phydev) { 197 if (!phydev) {
205 printk (KERN_ERR "%s: no PHY found\n", dev->name); 198 printk (KERN_ERR "%s: no PHY found\n", dev->name);
206 return -1; 199 return -1;
@@ -210,17 +203,13 @@ static int macb_mii_probe(struct net_device *dev)
210 /* TODO : add pin_irq */ 203 /* TODO : add pin_irq */
211 204
212 /* attach the mac to the phy */ 205 /* attach the mac to the phy */
213 if (pdata && pdata->is_rmii) { 206 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0,
214 phydev = phy_connect(dev, dev_name(&phydev->dev), 207 pdata && pdata->is_rmii ?
215 &macb_handle_link_change, 0, PHY_INTERFACE_MODE_RMII); 208 PHY_INTERFACE_MODE_RMII :
216 } else { 209 PHY_INTERFACE_MODE_MII);
217 phydev = phy_connect(dev, dev_name(&phydev->dev), 210 if (ret) {
218 &macb_handle_link_change, 0, PHY_INTERFACE_MODE_MII);
219 }
220
221 if (IS_ERR(phydev)) {
222 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 211 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
223 return PTR_ERR(phydev); 212 return ret;
224 } 213 }
225 214
226 /* mask with MAC supported features */ 215 /* mask with MAC supported features */
@@ -895,15 +884,12 @@ static void macb_sethashtable(struct net_device *dev)
895{ 884{
896 struct dev_mc_list *curr; 885 struct dev_mc_list *curr;
897 unsigned long mc_filter[2]; 886 unsigned long mc_filter[2];
898 unsigned int i, bitnr; 887 unsigned int bitnr;
899 struct macb *bp = netdev_priv(dev); 888 struct macb *bp = netdev_priv(dev);
900 889
901 mc_filter[0] = mc_filter[1] = 0; 890 mc_filter[0] = mc_filter[1] = 0;
902 891
903 curr = dev->mc_list; 892 netdev_for_each_mc_addr(curr, dev) {
904 for (i = 0; i < dev->mc_count; i++, curr = curr->next) {
905 if (!curr) break; /* unexpected end of list */
906
907 bitnr = hash_get_index(curr->dmi_addr); 893 bitnr = hash_get_index(curr->dmi_addr);
908 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); 894 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
909 } 895 }
@@ -934,7 +920,7 @@ static void macb_set_rx_mode(struct net_device *dev)
934 macb_writel(bp, HRB, -1); 920 macb_writel(bp, HRB, -1);
935 macb_writel(bp, HRT, -1); 921 macb_writel(bp, HRT, -1);
936 cfg |= MACB_BIT(NCFGR_MTI); 922 cfg |= MACB_BIT(NCFGR_MTI);
937 } else if (dev->mc_count > 0) { 923 } else if (!netdev_mc_empty(dev)) {
938 /* Enable specific multicasts */ 924 /* Enable specific multicasts */
939 macb_sethashtable(dev); 925 macb_sethashtable(dev);
940 cfg |= MACB_BIT(NCFGR_MTI); 926 cfg |= MACB_BIT(NCFGR_MTI);
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index 43aea91e3369..ab5f0bf6d1ae 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -588,7 +588,7 @@ static void mace_set_multicast(struct net_device *dev)
588{ 588{
589 struct mace_data *mp = netdev_priv(dev); 589 struct mace_data *mp = netdev_priv(dev);
590 volatile struct mace __iomem *mb = mp->mace; 590 volatile struct mace __iomem *mb = mp->mace;
591 int i, j; 591 int i;
592 u32 crc; 592 u32 crc;
593 unsigned long flags; 593 unsigned long flags;
594 594
@@ -598,7 +598,7 @@ static void mace_set_multicast(struct net_device *dev)
598 mp->maccc |= PROM; 598 mp->maccc |= PROM;
599 } else { 599 } else {
600 unsigned char multicast_filter[8]; 600 unsigned char multicast_filter[8];
601 struct dev_mc_list *dmi = dev->mc_list; 601 struct dev_mc_list *dmi;
602 602
603 if (dev->flags & IFF_ALLMULTI) { 603 if (dev->flags & IFF_ALLMULTI) {
604 for (i = 0; i < 8; i++) 604 for (i = 0; i < 8; i++)
@@ -606,11 +606,10 @@ static void mace_set_multicast(struct net_device *dev)
606 } else { 606 } else {
607 for (i = 0; i < 8; i++) 607 for (i = 0; i < 8; i++)
608 multicast_filter[i] = 0; 608 multicast_filter[i] = 0;
609 for (i = 0; i < dev->mc_count; i++) { 609 netdev_for_each_mc_addr(dmi, dev) {
610 crc = ether_crc_le(6, dmi->dmi_addr); 610 crc = ether_crc_le(6, dmi->dmi_addr);
611 j = crc >> 26; /* bit number in multicast_filter */ 611 i = crc >> 26; /* bit number in multicast_filter */
612 multicast_filter[j >> 3] |= 1 << (j & 7); 612 multicast_filter[i >> 3] |= 1 << (i & 7);
613 dmi = dmi->next;
614 } 613 }
615 } 614 }
616#if 0 615#if 0
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
index 79408c377875..13ba8f4afb7e 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/macmace.c
@@ -495,7 +495,7 @@ static void mace_set_multicast(struct net_device *dev)
495{ 495{
496 struct mace_data *mp = netdev_priv(dev); 496 struct mace_data *mp = netdev_priv(dev);
497 volatile struct mace *mb = mp->mace; 497 volatile struct mace *mb = mp->mace;
498 int i, j; 498 int i;
499 u32 crc; 499 u32 crc;
500 u8 maccc; 500 u8 maccc;
501 unsigned long flags; 501 unsigned long flags;
@@ -508,7 +508,7 @@ static void mace_set_multicast(struct net_device *dev)
508 mb->maccc |= PROM; 508 mb->maccc |= PROM;
509 } else { 509 } else {
510 unsigned char multicast_filter[8]; 510 unsigned char multicast_filter[8];
511 struct dev_mc_list *dmi = dev->mc_list; 511 struct dev_mc_list *dmi;
512 512
513 if (dev->flags & IFF_ALLMULTI) { 513 if (dev->flags & IFF_ALLMULTI) {
514 for (i = 0; i < 8; i++) { 514 for (i = 0; i < 8; i++) {
@@ -517,11 +517,11 @@ static void mace_set_multicast(struct net_device *dev)
517 } else { 517 } else {
518 for (i = 0; i < 8; i++) 518 for (i = 0; i < 8; i++)
519 multicast_filter[i] = 0; 519 multicast_filter[i] = 0;
520 for (i = 0; i < dev->mc_count; i++) { 520 netdev_for_each_mc_addr(dmi, dev) {
521 crc = ether_crc_le(6, dmi->dmi_addr); 521 crc = ether_crc_le(6, dmi->dmi_addr);
522 j = crc >> 26; /* bit number in multicast_filter */ 522 /* bit number in multicast_filter */
523 multicast_filter[j >> 3] |= 1 << (j & 7); 523 i = crc >> 26;
524 dmi = dmi->next; 524 multicast_filter[i >> 3] |= 1 << (i & 7);
525 } 525 }
526 } 526 }
527 527
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 21a9c9ab4b34..40faa368b07a 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -39,31 +39,6 @@ struct macvlan_port {
39 struct list_head vlans; 39 struct list_head vlans;
40}; 40};
41 41
42/**
43 * struct macvlan_rx_stats - MACVLAN percpu rx stats
44 * @rx_packets: number of received packets
45 * @rx_bytes: number of received bytes
46 * @multicast: number of received multicast packets
47 * @rx_errors: number of errors
48 */
49struct macvlan_rx_stats {
50 unsigned long rx_packets;
51 unsigned long rx_bytes;
52 unsigned long multicast;
53 unsigned long rx_errors;
54};
55
56struct macvlan_dev {
57 struct net_device *dev;
58 struct list_head list;
59 struct hlist_node hlist;
60 struct macvlan_port *port;
61 struct net_device *lowerdev;
62 struct macvlan_rx_stats *rx_stats;
63 enum macvlan_mode mode;
64};
65
66
67static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port, 42static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
68 const unsigned char *addr) 43 const unsigned char *addr)
69{ 44{
@@ -118,31 +93,17 @@ static int macvlan_addr_busy(const struct macvlan_port *port,
118 return 0; 93 return 0;
119} 94}
120 95
121static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
122 unsigned int len, bool success,
123 bool multicast)
124{
125 struct macvlan_rx_stats *rx_stats;
126
127 rx_stats = per_cpu_ptr(vlan->rx_stats, smp_processor_id());
128 if (likely(success)) {
129 rx_stats->rx_packets++;;
130 rx_stats->rx_bytes += len;
131 if (multicast)
132 rx_stats->multicast++;
133 } else {
134 rx_stats->rx_errors++;
135 }
136}
137 96
138static int macvlan_broadcast_one(struct sk_buff *skb, struct net_device *dev, 97static int macvlan_broadcast_one(struct sk_buff *skb,
98 const struct macvlan_dev *vlan,
139 const struct ethhdr *eth, bool local) 99 const struct ethhdr *eth, bool local)
140{ 100{
101 struct net_device *dev = vlan->dev;
141 if (!skb) 102 if (!skb)
142 return NET_RX_DROP; 103 return NET_RX_DROP;
143 104
144 if (local) 105 if (local)
145 return dev_forward_skb(dev, skb); 106 return vlan->forward(dev, skb);
146 107
147 skb->dev = dev; 108 skb->dev = dev;
148 if (!compare_ether_addr_64bits(eth->h_dest, 109 if (!compare_ether_addr_64bits(eth->h_dest,
@@ -151,7 +112,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb, struct net_device *dev,
151 else 112 else
152 skb->pkt_type = PACKET_MULTICAST; 113 skb->pkt_type = PACKET_MULTICAST;
153 114
154 return netif_rx(skb); 115 return vlan->receive(skb);
155} 116}
156 117
157static void macvlan_broadcast(struct sk_buff *skb, 118static void macvlan_broadcast(struct sk_buff *skb,
@@ -175,7 +136,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
175 continue; 136 continue;
176 137
177 nskb = skb_clone(skb, GFP_ATOMIC); 138 nskb = skb_clone(skb, GFP_ATOMIC);
178 err = macvlan_broadcast_one(nskb, vlan->dev, eth, 139 err = macvlan_broadcast_one(nskb, vlan, eth,
179 mode == MACVLAN_MODE_BRIDGE); 140 mode == MACVLAN_MODE_BRIDGE);
180 macvlan_count_rx(vlan, skb->len + ETH_HLEN, 141 macvlan_count_rx(vlan, skb->len + ETH_HLEN,
181 err == NET_RX_SUCCESS, 1); 142 err == NET_RX_SUCCESS, 1);
@@ -238,7 +199,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
238 skb->dev = dev; 199 skb->dev = dev;
239 skb->pkt_type = PACKET_HOST; 200 skb->pkt_type = PACKET_HOST;
240 201
241 netif_rx(skb); 202 vlan->receive(skb);
242 return NULL; 203 return NULL;
243} 204}
244 205
@@ -260,7 +221,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
260 dest = macvlan_hash_lookup(port, eth->h_dest); 221 dest = macvlan_hash_lookup(port, eth->h_dest);
261 if (dest && dest->mode == MACVLAN_MODE_BRIDGE) { 222 if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
262 unsigned int length = skb->len + ETH_HLEN; 223 unsigned int length = skb->len + ETH_HLEN;
263 int ret = dev_forward_skb(dest->dev, skb); 224 int ret = dest->forward(dest->dev, skb);
264 macvlan_count_rx(dest, length, 225 macvlan_count_rx(dest, length,
265 ret == NET_RX_SUCCESS, 0); 226 ret == NET_RX_SUCCESS, 0);
266 227
@@ -269,12 +230,12 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
269 } 230 }
270 231
271xmit_world: 232xmit_world:
272 skb->dev = vlan->lowerdev; 233 skb_set_dev(skb, vlan->lowerdev);
273 return dev_queue_xmit(skb); 234 return dev_queue_xmit(skb);
274} 235}
275 236
276static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, 237netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
277 struct net_device *dev) 238 struct net_device *dev)
278{ 239{
279 int i = skb_get_queue_mapping(skb); 240 int i = skb_get_queue_mapping(skb);
280 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 241 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
@@ -290,6 +251,7 @@ static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
290 251
291 return ret; 252 return ret;
292} 253}
254EXPORT_SYMBOL_GPL(macvlan_start_xmit);
293 255
294static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev, 256static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
295 unsigned short type, const void *daddr, 257 unsigned short type, const void *daddr,
@@ -418,7 +380,7 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
418#define MACVLAN_FEATURES \ 380#define MACVLAN_FEATURES \
419 (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ 381 (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
420 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ 382 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
421 NETIF_F_TSO_ECN | NETIF_F_TSO6) 383 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO)
422 384
423#define MACVLAN_STATE_MASK \ 385#define MACVLAN_STATE_MASK \
424 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) 386 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
@@ -623,8 +585,11 @@ static int macvlan_get_tx_queues(struct net *net,
623 return 0; 585 return 0;
624} 586}
625 587
626static int macvlan_newlink(struct net *src_net, struct net_device *dev, 588int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
627 struct nlattr *tb[], struct nlattr *data[]) 589 struct nlattr *tb[], struct nlattr *data[],
590 int (*receive)(struct sk_buff *skb),
591 int (*forward)(struct net_device *dev,
592 struct sk_buff *skb))
628{ 593{
629 struct macvlan_dev *vlan = netdev_priv(dev); 594 struct macvlan_dev *vlan = netdev_priv(dev);
630 struct macvlan_port *port; 595 struct macvlan_port *port;
@@ -664,6 +629,8 @@ static int macvlan_newlink(struct net *src_net, struct net_device *dev,
664 vlan->lowerdev = lowerdev; 629 vlan->lowerdev = lowerdev;
665 vlan->dev = dev; 630 vlan->dev = dev;
666 vlan->port = port; 631 vlan->port = port;
632 vlan->receive = receive;
633 vlan->forward = forward;
667 634
668 vlan->mode = MACVLAN_MODE_VEPA; 635 vlan->mode = MACVLAN_MODE_VEPA;
669 if (data && data[IFLA_MACVLAN_MODE]) 636 if (data && data[IFLA_MACVLAN_MODE])
@@ -677,8 +644,17 @@ static int macvlan_newlink(struct net *src_net, struct net_device *dev,
677 netif_stacked_transfer_operstate(lowerdev, dev); 644 netif_stacked_transfer_operstate(lowerdev, dev);
678 return 0; 645 return 0;
679} 646}
647EXPORT_SYMBOL_GPL(macvlan_common_newlink);
680 648
681static void macvlan_dellink(struct net_device *dev, struct list_head *head) 649static int macvlan_newlink(struct net *src_net, struct net_device *dev,
650 struct nlattr *tb[], struct nlattr *data[])
651{
652 return macvlan_common_newlink(src_net, dev, tb, data,
653 netif_rx,
654 dev_forward_skb);
655}
656
657void macvlan_dellink(struct net_device *dev, struct list_head *head)
682{ 658{
683 struct macvlan_dev *vlan = netdev_priv(dev); 659 struct macvlan_dev *vlan = netdev_priv(dev);
684 struct macvlan_port *port = vlan->port; 660 struct macvlan_port *port = vlan->port;
@@ -689,6 +665,7 @@ static void macvlan_dellink(struct net_device *dev, struct list_head *head)
689 if (list_empty(&port->vlans)) 665 if (list_empty(&port->vlans))
690 macvlan_port_destroy(port->dev); 666 macvlan_port_destroy(port->dev);
691} 667}
668EXPORT_SYMBOL_GPL(macvlan_dellink);
692 669
693static int macvlan_changelink(struct net_device *dev, 670static int macvlan_changelink(struct net_device *dev,
694 struct nlattr *tb[], struct nlattr *data[]) 671 struct nlattr *tb[], struct nlattr *data[])
@@ -720,19 +697,27 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
720 [IFLA_MACVLAN_MODE] = { .type = NLA_U32 }, 697 [IFLA_MACVLAN_MODE] = { .type = NLA_U32 },
721}; 698};
722 699
723static struct rtnl_link_ops macvlan_link_ops __read_mostly = { 700int macvlan_link_register(struct rtnl_link_ops *ops)
701{
702 /* common fields */
703 ops->priv_size = sizeof(struct macvlan_dev);
704 ops->get_tx_queues = macvlan_get_tx_queues;
705 ops->setup = macvlan_setup;
706 ops->validate = macvlan_validate;
707 ops->maxtype = IFLA_MACVLAN_MAX;
708 ops->policy = macvlan_policy;
709 ops->changelink = macvlan_changelink;
710 ops->get_size = macvlan_get_size;
711 ops->fill_info = macvlan_fill_info;
712
713 return rtnl_link_register(ops);
714};
715EXPORT_SYMBOL_GPL(macvlan_link_register);
716
717static struct rtnl_link_ops macvlan_link_ops = {
724 .kind = "macvlan", 718 .kind = "macvlan",
725 .priv_size = sizeof(struct macvlan_dev),
726 .get_tx_queues = macvlan_get_tx_queues,
727 .setup = macvlan_setup,
728 .validate = macvlan_validate,
729 .newlink = macvlan_newlink, 719 .newlink = macvlan_newlink,
730 .dellink = macvlan_dellink, 720 .dellink = macvlan_dellink,
731 .maxtype = IFLA_MACVLAN_MAX,
732 .policy = macvlan_policy,
733 .changelink = macvlan_changelink,
734 .get_size = macvlan_get_size,
735 .fill_info = macvlan_fill_info,
736}; 721};
737 722
738static int macvlan_device_event(struct notifier_block *unused, 723static int macvlan_device_event(struct notifier_block *unused,
@@ -761,7 +746,7 @@ static int macvlan_device_event(struct notifier_block *unused,
761 break; 746 break;
762 case NETDEV_UNREGISTER: 747 case NETDEV_UNREGISTER:
763 list_for_each_entry_safe(vlan, next, &port->vlans, list) 748 list_for_each_entry_safe(vlan, next, &port->vlans, list)
764 macvlan_dellink(vlan->dev, NULL); 749 vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL);
765 break; 750 break;
766 } 751 }
767 return NOTIFY_DONE; 752 return NOTIFY_DONE;
@@ -778,7 +763,7 @@ static int __init macvlan_init_module(void)
778 register_netdevice_notifier(&macvlan_notifier_block); 763 register_netdevice_notifier(&macvlan_notifier_block);
779 macvlan_handle_frame_hook = macvlan_handle_frame; 764 macvlan_handle_frame_hook = macvlan_handle_frame;
780 765
781 err = rtnl_link_register(&macvlan_link_ops); 766 err = macvlan_link_register(&macvlan_link_ops);
782 if (err < 0) 767 if (err < 0)
783 goto err1; 768 goto err1;
784 return 0; 769 return 0;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
new file mode 100644
index 000000000000..55ceae09738e
--- /dev/null
+++ b/drivers/net/macvtap.c
@@ -0,0 +1,803 @@
1#include <linux/etherdevice.h>
2#include <linux/if_macvlan.h>
3#include <linux/interrupt.h>
4#include <linux/nsproxy.h>
5#include <linux/compat.h>
6#include <linux/if_tun.h>
7#include <linux/module.h>
8#include <linux/skbuff.h>
9#include <linux/cache.h>
10#include <linux/sched.h>
11#include <linux/types.h>
12#include <linux/init.h>
13#include <linux/wait.h>
14#include <linux/cdev.h>
15#include <linux/fs.h>
16
17#include <net/net_namespace.h>
18#include <net/rtnetlink.h>
19#include <net/sock.h>
20#include <linux/virtio_net.h>
21
22/*
23 * A macvtap queue is the central object of this driver, it connects
24 * an open character device to a macvlan interface. There can be
25 * multiple queues on one interface, which map back to queues
26 * implemented in hardware on the underlying device.
27 *
28 * macvtap_proto is used to allocate queues through the sock allocation
29 * mechanism.
30 *
31 * TODO: multiqueue support is currently not implemented, even though
32 * macvtap is basically prepared for that. We will need to add this
33 * here as well as in virtio-net and qemu to get line rate on 10gbit
34 * adapters from a guest.
35 */
36struct macvtap_queue {
37 struct sock sk;
38 struct socket sock;
39 struct macvlan_dev *vlan;
40 struct file *file;
41 unsigned int flags;
42};
43
44static struct proto macvtap_proto = {
45 .name = "macvtap",
46 .owner = THIS_MODULE,
47 .obj_size = sizeof (struct macvtap_queue),
48};
49
50/*
51 * Minor number matches netdev->ifindex, so need a potentially
52 * large value. This also makes it possible to split the
53 * tap functionality out again in the future by offering it
54 * from other drivers besides macvtap. As long as every device
55 * only has one tap, the interface numbers assure that the
56 * device nodes are unique.
57 */
58static unsigned int macvtap_major;
59#define MACVTAP_NUM_DEVS 65536
60static struct class *macvtap_class;
61static struct cdev macvtap_cdev;
62
63static const struct proto_ops macvtap_socket_ops;
64
65/*
66 * RCU usage:
67 * The macvtap_queue and the macvlan_dev are loosely coupled, the
68 * pointers from one to the other can only be read while rcu_read_lock
69 * or macvtap_lock is held.
70 *
71 * Both the file and the macvlan_dev hold a reference on the macvtap_queue
72 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
73 * q->vlan becomes inaccessible. When the files gets closed,
74 * macvtap_get_queue() fails.
75 *
76 * There may still be references to the struct sock inside of the
77 * queue from outbound SKBs, but these never reference back to the
78 * file or the dev. The data structure is freed through __sk_free
79 * when both our references and any pending SKBs are gone.
80 */
81static DEFINE_SPINLOCK(macvtap_lock);
82
83/*
84 * Choose the next free queue, for now there is only one
85 */
86static int macvtap_set_queue(struct net_device *dev, struct file *file,
87 struct macvtap_queue *q)
88{
89 struct macvlan_dev *vlan = netdev_priv(dev);
90 int err = -EBUSY;
91
92 spin_lock(&macvtap_lock);
93 if (rcu_dereference(vlan->tap))
94 goto out;
95
96 err = 0;
97 rcu_assign_pointer(q->vlan, vlan);
98 rcu_assign_pointer(vlan->tap, q);
99 sock_hold(&q->sk);
100
101 q->file = file;
102 file->private_data = q;
103
104out:
105 spin_unlock(&macvtap_lock);
106 return err;
107}
108
109/*
110 * The file owning the queue got closed, give up both
111 * the reference that the files holds as well as the
112 * one from the macvlan_dev if that still exists.
113 *
114 * Using the spinlock makes sure that we don't get
115 * to the queue again after destroying it.
116 */
117static void macvtap_put_queue(struct macvtap_queue *q)
118{
119 struct macvlan_dev *vlan;
120
121 spin_lock(&macvtap_lock);
122 vlan = rcu_dereference(q->vlan);
123 if (vlan) {
124 rcu_assign_pointer(vlan->tap, NULL);
125 rcu_assign_pointer(q->vlan, NULL);
126 sock_put(&q->sk);
127 }
128
129 spin_unlock(&macvtap_lock);
130
131 synchronize_rcu();
132 sock_put(&q->sk);
133}
134
135/*
136 * Since we only support one queue, just dereference the pointer.
137 */
138static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
139 struct sk_buff *skb)
140{
141 struct macvlan_dev *vlan = netdev_priv(dev);
142
143 return rcu_dereference(vlan->tap);
144}
145
146/*
147 * The net_device is going away, give up the reference
148 * that it holds on the queue (all the queues one day)
149 * and safely set the pointer from the queues to NULL.
150 */
151static void macvtap_del_queues(struct net_device *dev)
152{
153 struct macvlan_dev *vlan = netdev_priv(dev);
154 struct macvtap_queue *q;
155
156 spin_lock(&macvtap_lock);
157 q = rcu_dereference(vlan->tap);
158 if (!q) {
159 spin_unlock(&macvtap_lock);
160 return;
161 }
162
163 rcu_assign_pointer(vlan->tap, NULL);
164 rcu_assign_pointer(q->vlan, NULL);
165 spin_unlock(&macvtap_lock);
166
167 synchronize_rcu();
168 sock_put(&q->sk);
169}
170
171/*
172 * Forward happens for data that gets sent from one macvlan
173 * endpoint to another one in bridge mode. We just take
174 * the skb and put it into the receive queue.
175 */
176static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
177{
178 struct macvtap_queue *q = macvtap_get_queue(dev, skb);
179 if (!q)
180 return -ENOLINK;
181
182 skb_queue_tail(&q->sk.sk_receive_queue, skb);
183 wake_up_interruptible_poll(q->sk.sk_sleep, POLLIN | POLLRDNORM | POLLRDBAND);
184 return 0;
185}
186
187/*
188 * Receive is for data from the external interface (lowerdev),
189 * in case of macvtap, we can treat that the same way as
190 * forward, which macvlan cannot.
191 */
192static int macvtap_receive(struct sk_buff *skb)
193{
194 skb_push(skb, ETH_HLEN);
195 return macvtap_forward(skb->dev, skb);
196}
197
198static int macvtap_newlink(struct net *src_net,
199 struct net_device *dev,
200 struct nlattr *tb[],
201 struct nlattr *data[])
202{
203 struct device *classdev;
204 dev_t devt;
205 int err;
206
207 err = macvlan_common_newlink(src_net, dev, tb, data,
208 macvtap_receive, macvtap_forward);
209 if (err)
210 goto out;
211
212 devt = MKDEV(MAJOR(macvtap_major), dev->ifindex);
213
214 classdev = device_create(macvtap_class, &dev->dev, devt,
215 dev, "tap%d", dev->ifindex);
216 if (IS_ERR(classdev)) {
217 err = PTR_ERR(classdev);
218 macvtap_del_queues(dev);
219 }
220
221out:
222 return err;
223}
224
225static void macvtap_dellink(struct net_device *dev,
226 struct list_head *head)
227{
228 device_destroy(macvtap_class,
229 MKDEV(MAJOR(macvtap_major), dev->ifindex));
230
231 macvtap_del_queues(dev);
232 macvlan_dellink(dev, head);
233}
234
235static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
236 .kind = "macvtap",
237 .newlink = macvtap_newlink,
238 .dellink = macvtap_dellink,
239};
240
241
242static void macvtap_sock_write_space(struct sock *sk)
243{
244 if (!sock_writeable(sk) ||
245 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
246 return;
247
248 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
249 wake_up_interruptible_poll(sk->sk_sleep, POLLOUT | POLLWRNORM | POLLWRBAND);
250}
251
252static int macvtap_open(struct inode *inode, struct file *file)
253{
254 struct net *net = current->nsproxy->net_ns;
255 struct net_device *dev = dev_get_by_index(net, iminor(inode));
256 struct macvtap_queue *q;
257 int err;
258
259 err = -ENODEV;
260 if (!dev)
261 goto out;
262
263 /* check if this is a macvtap device */
264 err = -EINVAL;
265 if (dev->rtnl_link_ops != &macvtap_link_ops)
266 goto out;
267
268 err = -ENOMEM;
269 q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
270 &macvtap_proto);
271 if (!q)
272 goto out;
273
274 init_waitqueue_head(&q->sock.wait);
275 q->sock.type = SOCK_RAW;
276 q->sock.state = SS_CONNECTED;
277 q->sock.file = file;
278 q->sock.ops = &macvtap_socket_ops;
279 sock_init_data(&q->sock, &q->sk);
280 q->sk.sk_write_space = macvtap_sock_write_space;
281 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
282
283 err = macvtap_set_queue(dev, file, q);
284 if (err)
285 sock_put(&q->sk);
286
287out:
288 if (dev)
289 dev_put(dev);
290
291 return err;
292}
293
294static int macvtap_release(struct inode *inode, struct file *file)
295{
296 struct macvtap_queue *q = file->private_data;
297 macvtap_put_queue(q);
298 return 0;
299}
300
301static unsigned int macvtap_poll(struct file *file, poll_table * wait)
302{
303 struct macvtap_queue *q = file->private_data;
304 unsigned int mask = POLLERR;
305
306 if (!q)
307 goto out;
308
309 mask = 0;
310 poll_wait(file, &q->sock.wait, wait);
311
312 if (!skb_queue_empty(&q->sk.sk_receive_queue))
313 mask |= POLLIN | POLLRDNORM;
314
315 if (sock_writeable(&q->sk) ||
316 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) &&
317 sock_writeable(&q->sk)))
318 mask |= POLLOUT | POLLWRNORM;
319
320out:
321 return mask;
322}
323
324static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
325 size_t len, size_t linear,
326 int noblock, int *err)
327{
328 struct sk_buff *skb;
329
330 /* Under a page? Don't bother with paged skb. */
331 if (prepad + len < PAGE_SIZE || !linear)
332 linear = len;
333
334 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
335 err);
336 if (!skb)
337 return NULL;
338
339 skb_reserve(skb, prepad);
340 skb_put(skb, linear);
341 skb->data_len = len - linear;
342 skb->len += len - linear;
343
344 return skb;
345}
346
347/*
348 * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
349 * be shared with the tun/tap driver.
350 */
351static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
352 struct virtio_net_hdr *vnet_hdr)
353{
354 unsigned short gso_type = 0;
355 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
356 switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
357 case VIRTIO_NET_HDR_GSO_TCPV4:
358 gso_type = SKB_GSO_TCPV4;
359 break;
360 case VIRTIO_NET_HDR_GSO_TCPV6:
361 gso_type = SKB_GSO_TCPV6;
362 break;
363 case VIRTIO_NET_HDR_GSO_UDP:
364 gso_type = SKB_GSO_UDP;
365 break;
366 default:
367 return -EINVAL;
368 }
369
370 if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
371 gso_type |= SKB_GSO_TCP_ECN;
372
373 if (vnet_hdr->gso_size == 0)
374 return -EINVAL;
375 }
376
377 if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
378 if (!skb_partial_csum_set(skb, vnet_hdr->csum_start,
379 vnet_hdr->csum_offset))
380 return -EINVAL;
381 }
382
383 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
384 skb_shinfo(skb)->gso_size = vnet_hdr->gso_size;
385 skb_shinfo(skb)->gso_type = gso_type;
386
387 /* Header must be checked, and gso_segs computed. */
388 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
389 skb_shinfo(skb)->gso_segs = 0;
390 }
391 return 0;
392}
393
394static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
395 struct virtio_net_hdr *vnet_hdr)
396{
397 memset(vnet_hdr, 0, sizeof(*vnet_hdr));
398
399 if (skb_is_gso(skb)) {
400 struct skb_shared_info *sinfo = skb_shinfo(skb);
401
402 /* This is a hint as to how much should be linear. */
403 vnet_hdr->hdr_len = skb_headlen(skb);
404 vnet_hdr->gso_size = sinfo->gso_size;
405 if (sinfo->gso_type & SKB_GSO_TCPV4)
406 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
407 else if (sinfo->gso_type & SKB_GSO_TCPV6)
408 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
409 else if (sinfo->gso_type & SKB_GSO_UDP)
410 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
411 else
412 BUG();
413 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
414 vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
415 } else
416 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
417
418 if (skb->ip_summed == CHECKSUM_PARTIAL) {
419 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
420 vnet_hdr->csum_start = skb->csum_start -
421 skb_headroom(skb);
422 vnet_hdr->csum_offset = skb->csum_offset;
423 } /* else everything is zero */
424
425 return 0;
426}
427
428
429/* Get packet from user space buffer */
430static ssize_t macvtap_get_user(struct macvtap_queue *q,
431 const struct iovec *iv, size_t count,
432 int noblock)
433{
434 struct sk_buff *skb;
435 struct macvlan_dev *vlan;
436 size_t len = count;
437 int err;
438 struct virtio_net_hdr vnet_hdr = { 0 };
439 int vnet_hdr_len = 0;
440
441 if (q->flags & IFF_VNET_HDR) {
442 vnet_hdr_len = sizeof(vnet_hdr);
443
444 err = -EINVAL;
445 if ((len -= vnet_hdr_len) < 0)
446 goto err;
447
448 err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
449 vnet_hdr_len);
450 if (err < 0)
451 goto err;
452 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
453 vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
454 vnet_hdr.hdr_len)
455 vnet_hdr.hdr_len = vnet_hdr.csum_start +
456 vnet_hdr.csum_offset + 2;
457 err = -EINVAL;
458 if (vnet_hdr.hdr_len > len)
459 goto err;
460 }
461
462 err = -EINVAL;
463 if (unlikely(len < ETH_HLEN))
464 goto err;
465
466 skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, len, vnet_hdr.hdr_len,
467 noblock, &err);
468 if (!skb)
469 goto err;
470
471 err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len, len);
472 if (err)
473 goto err_kfree;
474
475 skb_set_network_header(skb, ETH_HLEN);
476 skb_reset_mac_header(skb);
477 skb->protocol = eth_hdr(skb)->h_proto;
478
479 if (vnet_hdr_len) {
480 err = macvtap_skb_from_vnet_hdr(skb, &vnet_hdr);
481 if (err)
482 goto err_kfree;
483 }
484
485 rcu_read_lock_bh();
486 vlan = rcu_dereference(q->vlan);
487 if (vlan)
488 macvlan_start_xmit(skb, vlan->dev);
489 else
490 kfree_skb(skb);
491 rcu_read_unlock_bh();
492
493 return count;
494
495err_kfree:
496 kfree_skb(skb);
497
498err:
499 rcu_read_lock_bh();
500 vlan = rcu_dereference(q->vlan);
501 if (vlan)
502 netdev_get_tx_queue(vlan->dev, 0)->tx_dropped++;
503 rcu_read_unlock_bh();
504
505 return err;
506}
507
508static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv,
509 unsigned long count, loff_t pos)
510{
511 struct file *file = iocb->ki_filp;
512 ssize_t result = -ENOLINK;
513 struct macvtap_queue *q = file->private_data;
514
515 result = macvtap_get_user(q, iv, iov_length(iv, count),
516 file->f_flags & O_NONBLOCK);
517 return result;
518}
519
520/* Put packet to the user space buffer */
521static ssize_t macvtap_put_user(struct macvtap_queue *q,
522 const struct sk_buff *skb,
523 const struct iovec *iv, int len)
524{
525 struct macvlan_dev *vlan;
526 int ret;
527 int vnet_hdr_len = 0;
528
529 if (q->flags & IFF_VNET_HDR) {
530 struct virtio_net_hdr vnet_hdr;
531 vnet_hdr_len = sizeof (vnet_hdr);
532 if ((len -= vnet_hdr_len) < 0)
533 return -EINVAL;
534
535 ret = macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
536 if (ret)
537 return ret;
538
539 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, vnet_hdr_len))
540 return -EFAULT;
541 }
542
543 len = min_t(int, skb->len, len);
544
545 ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
546
547 rcu_read_lock_bh();
548 vlan = rcu_dereference(q->vlan);
549 if (vlan)
550 macvlan_count_rx(vlan, len, ret == 0, 0);
551 rcu_read_unlock_bh();
552
553 return ret ? ret : (len + vnet_hdr_len);
554}
555
556static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
557 const struct iovec *iv, unsigned long len,
558 int noblock)
559{
560 DECLARE_WAITQUEUE(wait, current);
561 struct sk_buff *skb;
562 ssize_t ret = 0;
563
564 add_wait_queue(q->sk.sk_sleep, &wait);
565 while (len) {
566 current->state = TASK_INTERRUPTIBLE;
567
568 /* Read frames from the queue */
569 skb = skb_dequeue(&q->sk.sk_receive_queue);
570 if (!skb) {
571 if (noblock) {
572 ret = -EAGAIN;
573 break;
574 }
575 if (signal_pending(current)) {
576 ret = -ERESTARTSYS;
577 break;
578 }
579 /* Nothing to read, let's sleep */
580 schedule();
581 continue;
582 }
583 ret = macvtap_put_user(q, skb, iv, len);
584 kfree_skb(skb);
585 break;
586 }
587
588 current->state = TASK_RUNNING;
589 remove_wait_queue(q->sk.sk_sleep, &wait);
590 return ret;
591}
592
593static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
594 unsigned long count, loff_t pos)
595{
596 struct file *file = iocb->ki_filp;
597 struct macvtap_queue *q = file->private_data;
598 ssize_t len, ret = 0;
599
600 len = iov_length(iv, count);
601 if (len < 0) {
602 ret = -EINVAL;
603 goto out;
604 }
605
606 ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK);
607 ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */
608out:
609 return ret;
610}
611
612/*
613 * provide compatibility with generic tun/tap interface
614 */
615static long macvtap_ioctl(struct file *file, unsigned int cmd,
616 unsigned long arg)
617{
618 struct macvtap_queue *q = file->private_data;
619 struct macvlan_dev *vlan;
620 void __user *argp = (void __user *)arg;
621 struct ifreq __user *ifr = argp;
622 unsigned int __user *up = argp;
623 unsigned int u;
624 int ret;
625
626 switch (cmd) {
627 case TUNSETIFF:
628 /* ignore the name, just look at flags */
629 if (get_user(u, &ifr->ifr_flags))
630 return -EFAULT;
631
632 ret = 0;
633 if ((u & ~IFF_VNET_HDR) != (IFF_NO_PI | IFF_TAP))
634 ret = -EINVAL;
635 else
636 q->flags = u;
637
638 return ret;
639
640 case TUNGETIFF:
641 rcu_read_lock_bh();
642 vlan = rcu_dereference(q->vlan);
643 if (vlan)
644 dev_hold(vlan->dev);
645 rcu_read_unlock_bh();
646
647 if (!vlan)
648 return -ENOLINK;
649
650 ret = 0;
651 if (copy_to_user(&ifr->ifr_name, q->vlan->dev->name, IFNAMSIZ) ||
652 put_user(q->flags, &ifr->ifr_flags))
653 ret = -EFAULT;
654 dev_put(vlan->dev);
655 return ret;
656
657 case TUNGETFEATURES:
658 if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR, up))
659 return -EFAULT;
660 return 0;
661
662 case TUNSETSNDBUF:
663 if (get_user(u, up))
664 return -EFAULT;
665
666 q->sk.sk_sndbuf = u;
667 return 0;
668
669 case TUNSETOFFLOAD:
670 /* let the user check for future flags */
671 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
672 TUN_F_TSO_ECN | TUN_F_UFO))
673 return -EINVAL;
674
675 /* TODO: only accept frames with the features that
676 got enabled for forwarded frames */
677 if (!(q->flags & IFF_VNET_HDR))
678 return -EINVAL;
679 return 0;
680
681 default:
682 return -EINVAL;
683 }
684}
685
686#ifdef CONFIG_COMPAT
687static long macvtap_compat_ioctl(struct file *file, unsigned int cmd,
688 unsigned long arg)
689{
690 return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
691}
692#endif
693
694static const struct file_operations macvtap_fops = {
695 .owner = THIS_MODULE,
696 .open = macvtap_open,
697 .release = macvtap_release,
698 .aio_read = macvtap_aio_read,
699 .aio_write = macvtap_aio_write,
700 .poll = macvtap_poll,
701 .llseek = no_llseek,
702 .unlocked_ioctl = macvtap_ioctl,
703#ifdef CONFIG_COMPAT
704 .compat_ioctl = macvtap_compat_ioctl,
705#endif
706};
707
708static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock,
709 struct msghdr *m, size_t total_len)
710{
711 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
712 return macvtap_get_user(q, m->msg_iov, total_len,
713 m->msg_flags & MSG_DONTWAIT);
714}
715
716static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
717 struct msghdr *m, size_t total_len,
718 int flags)
719{
720 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
721 int ret;
722 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
723 return -EINVAL;
724 ret = macvtap_do_read(q, iocb, m->msg_iov, total_len,
725 flags & MSG_DONTWAIT);
726 if (ret > total_len) {
727 m->msg_flags |= MSG_TRUNC;
728 ret = flags & MSG_TRUNC ? ret : total_len;
729 }
730 return ret;
731}
732
733/* Ops structure to mimic raw sockets with tun */
734static const struct proto_ops macvtap_socket_ops = {
735 .sendmsg = macvtap_sendmsg,
736 .recvmsg = macvtap_recvmsg,
737};
738
739/* Get an underlying socket object from tun file. Returns error unless file is
740 * attached to a device. The returned object works like a packet socket, it
741 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
742 * holding a reference to the file for as long as the socket is in use. */
743struct socket *macvtap_get_socket(struct file *file)
744{
745 struct macvtap_queue *q;
746 if (file->f_op != &macvtap_fops)
747 return ERR_PTR(-EINVAL);
748 q = file->private_data;
749 if (!q)
750 return ERR_PTR(-EBADFD);
751 return &q->sock;
752}
753EXPORT_SYMBOL_GPL(macvtap_get_socket);
754
755static int macvtap_init(void)
756{
757 int err;
758
759 err = alloc_chrdev_region(&macvtap_major, 0,
760 MACVTAP_NUM_DEVS, "macvtap");
761 if (err)
762 goto out1;
763
764 cdev_init(&macvtap_cdev, &macvtap_fops);
765 err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS);
766 if (err)
767 goto out2;
768
769 macvtap_class = class_create(THIS_MODULE, "macvtap");
770 if (IS_ERR(macvtap_class)) {
771 err = PTR_ERR(macvtap_class);
772 goto out3;
773 }
774
775 err = macvlan_link_register(&macvtap_link_ops);
776 if (err)
777 goto out4;
778
779 return 0;
780
781out4:
782 class_unregister(macvtap_class);
783out3:
784 cdev_del(&macvtap_cdev);
785out2:
786 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
787out1:
788 return err;
789}
790module_init(macvtap_init);
791
792static void macvtap_exit(void)
793{
794 rtnl_link_unregister(&macvtap_link_ops);
795 class_unregister(macvtap_class);
796 cdev_del(&macvtap_cdev);
797 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
798}
799module_exit(macvtap_exit);
800
801MODULE_ALIAS_RTNL_LINK("macvtap");
802MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
803MODULE_LICENSE("GPL");
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 2af81735386b..9f72cb45f4af 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -51,14 +51,11 @@
51 51
52static const char *meth_str="SGI O2 Fast Ethernet"; 52static const char *meth_str="SGI O2 Fast Ethernet";
53 53
54#define HAVE_TX_TIMEOUT
55/* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */ 54/* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */
56#define TX_TIMEOUT (400*HZ/1000) 55#define TX_TIMEOUT (400*HZ/1000)
57 56
58#ifdef HAVE_TX_TIMEOUT
59static int timeout = TX_TIMEOUT; 57static int timeout = TX_TIMEOUT;
60module_param(timeout, int, 0); 58module_param(timeout, int, 0);
61#endif
62 59
63/* 60/*
64 * This structure is private to each device. It is used to pass 61 * This structure is private to each device. It is used to pass
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 829b9ec9ff67..64394647dddc 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -508,11 +508,11 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
508 /* We are copying all relevant data to the skb - temporarily 508 /* We are copying all relevant data to the skb - temporarily
509 * synch buffers for the copy */ 509 * synch buffers for the copy */
510 dma = be64_to_cpu(rx_desc->data[0].addr); 510 dma = be64_to_cpu(rx_desc->data[0].addr);
511 dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0, 511 dma_sync_single_for_cpu(&mdev->pdev->dev, dma, length,
512 length, DMA_FROM_DEVICE); 512 DMA_FROM_DEVICE);
513 skb_copy_to_linear_data(skb, va, length); 513 skb_copy_to_linear_data(skb, va, length);
514 dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0, 514 dma_sync_single_for_device(&mdev->pdev->dev, dma, length,
515 length, DMA_FROM_DEVICE); 515 DMA_FROM_DEVICE);
516 skb->tail += length; 516 skb->tail += length;
517 } else { 517 } else {
518 518
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 3cf56d90d859..8f6e816a7395 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1271,7 +1271,7 @@ int mlx4_restart_one(struct pci_dev *pdev)
1271 return __mlx4_init_one(pdev, NULL); 1271 return __mlx4_init_one(pdev, NULL);
1272} 1272}
1273 1273
1274static struct pci_device_id mlx4_pci_table[] = { 1274static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
1275 { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */ 1275 { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
1276 { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */ 1276 { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
1277 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */ 1277 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index af67af55efe7..c97b6e4365a9 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -55,7 +55,6 @@
55#include <linux/types.h> 55#include <linux/types.h>
56#include <linux/inet_lro.h> 56#include <linux/inet_lro.h>
57#include <asm/system.h> 57#include <asm/system.h>
58#include <linux/list.h>
59 58
60static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 59static char mv643xx_eth_driver_name[] = "mv643xx_eth";
61static char mv643xx_eth_driver_version[] = "1.4"; 60static char mv643xx_eth_driver_version[] = "1.4";
@@ -1697,7 +1696,7 @@ static u32 uc_addr_filter_mask(struct net_device *dev)
1697 return 0; 1696 return 0;
1698 1697
1699 nibbles = 1 << (dev->dev_addr[5] & 0x0f); 1698 nibbles = 1 << (dev->dev_addr[5] & 0x0f);
1700 list_for_each_entry(ha, &dev->uc.list, list) { 1699 netdev_for_each_uc_addr(ha, dev) {
1701 if (memcmp(dev->dev_addr, ha->addr, 5)) 1700 if (memcmp(dev->dev_addr, ha->addr, 5))
1702 return 0; 1701 return 0;
1703 if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0) 1702 if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
@@ -1795,7 +1794,7 @@ oom:
1795 memset(mc_spec, 0, 0x100); 1794 memset(mc_spec, 0, 0x100);
1796 memset(mc_other, 0, 0x100); 1795 memset(mc_other, 0, 0x100);
1797 1796
1798 for (addr = dev->mc_list; addr != NULL; addr = addr->next) { 1797 netdev_for_each_mc_addr(addr, dev) {
1799 u8 *a = addr->da_addr; 1798 u8 *a = addr->da_addr;
1800 u32 *table; 1799 u32 *table;
1801 int entry; 1800 int entry;
@@ -2847,6 +2846,7 @@ static const struct net_device_ops mv643xx_eth_netdev_ops = {
2847 .ndo_start_xmit = mv643xx_eth_xmit, 2846 .ndo_start_xmit = mv643xx_eth_xmit,
2848 .ndo_set_rx_mode = mv643xx_eth_set_rx_mode, 2847 .ndo_set_rx_mode = mv643xx_eth_set_rx_mode,
2849 .ndo_set_mac_address = mv643xx_eth_set_mac_address, 2848 .ndo_set_mac_address = mv643xx_eth_set_mac_address,
2849 .ndo_validate_addr = eth_validate_addr,
2850 .ndo_do_ioctl = mv643xx_eth_ioctl, 2850 .ndo_do_ioctl = mv643xx_eth_ioctl,
2851 .ndo_change_mtu = mv643xx_eth_change_mtu, 2851 .ndo_change_mtu = mv643xx_eth_change_mtu,
2852 .ndo_tx_timeout = mv643xx_eth_tx_timeout, 2852 .ndo_tx_timeout = mv643xx_eth_tx_timeout,
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 3fcb1c356e0d..676c513e12fc 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -38,6 +38,8 @@
38 * Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006 38 * Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006
39 *************************************************************************/ 39 *************************************************************************/
40 40
41#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
42
41#include <linux/tcp.h> 43#include <linux/tcp.h>
42#include <linux/netdevice.h> 44#include <linux/netdevice.h>
43#include <linux/skbuff.h> 45#include <linux/skbuff.h>
@@ -75,7 +77,7 @@
75#include "myri10ge_mcp.h" 77#include "myri10ge_mcp.h"
76#include "myri10ge_mcp_gen_header.h" 78#include "myri10ge_mcp_gen_header.h"
77 79
78#define MYRI10GE_VERSION_STR "1.5.1-1.453" 80#define MYRI10GE_VERSION_STR "1.5.2-1.459"
79 81
80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 82MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
81MODULE_AUTHOR("Maintainer: help@myri.com"); 83MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -819,9 +821,7 @@ static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause)
819 status = myri10ge_send_cmd(mgp, ctl, &cmd, 0); 821 status = myri10ge_send_cmd(mgp, ctl, &cmd, 0);
820 822
821 if (status) { 823 if (status) {
822 printk(KERN_ERR 824 netdev_err(mgp->dev, "Failed to set flow control mode\n");
823 "myri10ge: %s: Failed to set flow control mode\n",
824 mgp->dev->name);
825 return status; 825 return status;
826 } 826 }
827 mgp->pause = pause; 827 mgp->pause = pause;
@@ -837,8 +837,7 @@ myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc, int atomic)
837 ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC; 837 ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC;
838 status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic); 838 status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic);
839 if (status) 839 if (status)
840 printk(KERN_ERR "myri10ge: %s: Failed to set promisc mode\n", 840 netdev_err(mgp->dev, "Failed to set promisc mode\n");
841 mgp->dev->name);
842} 841}
843 842
844static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type) 843static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type)
@@ -1201,6 +1200,9 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
1201{ 1200{
1202 struct page *page; 1201 struct page *page;
1203 int idx; 1202 int idx;
1203#if MYRI10GE_ALLOC_SIZE > 4096
1204 int end_offset;
1205#endif
1204 1206
1205 if (unlikely(rx->watchdog_needed && !watchdog)) 1207 if (unlikely(rx->watchdog_needed && !watchdog))
1206 return; 1208 return;
@@ -1242,9 +1244,9 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
1242 1244
1243#if MYRI10GE_ALLOC_SIZE > 4096 1245#if MYRI10GE_ALLOC_SIZE > 4096
1244 /* don't cross a 4KB boundary */ 1246 /* don't cross a 4KB boundary */
1245 if ((rx->page_offset >> 12) != 1247 end_offset = rx->page_offset + bytes - 1;
1246 ((rx->page_offset + bytes - 1) >> 12)) 1248 if ((unsigned)(rx->page_offset ^ end_offset) > 4095)
1247 rx->page_offset = (rx->page_offset + 4096) & ~4095; 1249 rx->page_offset = end_offset & ~4095;
1248#endif 1250#endif
1249 rx->fill_cnt++; 1251 rx->fill_cnt++;
1250 1252
@@ -1482,19 +1484,15 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
1482 1484
1483 if (mgp->link_state == MXGEFW_LINK_UP) { 1485 if (mgp->link_state == MXGEFW_LINK_UP) {
1484 if (netif_msg_link(mgp)) 1486 if (netif_msg_link(mgp))
1485 printk(KERN_INFO 1487 netdev_info(mgp->dev, "link up\n");
1486 "myri10ge: %s: link up\n",
1487 mgp->dev->name);
1488 netif_carrier_on(mgp->dev); 1488 netif_carrier_on(mgp->dev);
1489 mgp->link_changes++; 1489 mgp->link_changes++;
1490 } else { 1490 } else {
1491 if (netif_msg_link(mgp)) 1491 if (netif_msg_link(mgp))
1492 printk(KERN_INFO 1492 netdev_info(mgp->dev, "link %s\n",
1493 "myri10ge: %s: link %s\n", 1493 link_up == MXGEFW_LINK_MYRINET ?
1494 mgp->dev->name, 1494 "mismatch (Myrinet detected)" :
1495 (link_up == MXGEFW_LINK_MYRINET ? 1495 "down");
1496 "mismatch (Myrinet detected)" :
1497 "down"));
1498 netif_carrier_off(mgp->dev); 1496 netif_carrier_off(mgp->dev);
1499 mgp->link_changes++; 1497 mgp->link_changes++;
1500 } 1498 }
@@ -1503,9 +1501,8 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
1503 ntohl(stats->rdma_tags_available)) { 1501 ntohl(stats->rdma_tags_available)) {
1504 mgp->rdma_tags_available = 1502 mgp->rdma_tags_available =
1505 ntohl(stats->rdma_tags_available); 1503 ntohl(stats->rdma_tags_available);
1506 printk(KERN_WARNING "myri10ge: %s: RDMA timed out! " 1504 netdev_warn(mgp->dev, "RDMA timed out! %d tags left\n",
1507 "%d tags left\n", mgp->dev->name, 1505 mgp->rdma_tags_available);
1508 mgp->rdma_tags_available);
1509 } 1506 }
1510 mgp->down_cnt += stats->link_down; 1507 mgp->down_cnt += stats->link_down;
1511 if (stats->link_down) 1508 if (stats->link_down)
@@ -1576,8 +1573,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
1576 if (send_done_count != tx->pkt_done) 1573 if (send_done_count != tx->pkt_done)
1577 myri10ge_tx_done(ss, (int)send_done_count); 1574 myri10ge_tx_done(ss, (int)send_done_count);
1578 if (unlikely(i > myri10ge_max_irq_loops)) { 1575 if (unlikely(i > myri10ge_max_irq_loops)) {
1579 printk(KERN_WARNING "myri10ge: %s: irq stuck?\n", 1576 netdev_err(mgp->dev, "irq stuck?\n");
1580 mgp->dev->name);
1581 stats->valid = 0; 1577 stats->valid = 0;
1582 schedule_work(&mgp->watchdog_work); 1578 schedule_work(&mgp->watchdog_work);
1583 } 1579 }
@@ -1614,16 +1610,14 @@ myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1614 */ 1610 */
1615 ptr = mgp->product_code_string; 1611 ptr = mgp->product_code_string;
1616 if (ptr == NULL) { 1612 if (ptr == NULL) {
1617 printk(KERN_ERR "myri10ge: %s: Missing product code\n", 1613 netdev_err(netdev, "Missing product code\n");
1618 netdev->name);
1619 return 0; 1614 return 0;
1620 } 1615 }
1621 for (i = 0; i < 3; i++, ptr++) { 1616 for (i = 0; i < 3; i++, ptr++) {
1622 ptr = strchr(ptr, '-'); 1617 ptr = strchr(ptr, '-');
1623 if (ptr == NULL) { 1618 if (ptr == NULL) {
1624 printk(KERN_ERR "myri10ge: %s: Invalid product " 1619 netdev_err(netdev, "Invalid product code %s\n",
1625 "code %s\n", netdev->name, 1620 mgp->product_code_string);
1626 mgp->product_code_string);
1627 return 0; 1621 return 0;
1628 } 1622 }
1629 } 1623 }
@@ -2009,17 +2003,15 @@ static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
2009 mgp->small_bytes + MXGEFW_PAD, 0); 2003 mgp->small_bytes + MXGEFW_PAD, 0);
2010 2004
2011 if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) { 2005 if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) {
2012 printk(KERN_ERR 2006 netdev_err(dev, "slice-%d: alloced only %d small bufs\n",
2013 "myri10ge: %s:slice-%d: alloced only %d small bufs\n", 2007 slice, ss->rx_small.fill_cnt);
2014 dev->name, slice, ss->rx_small.fill_cnt);
2015 goto abort_with_rx_small_ring; 2008 goto abort_with_rx_small_ring;
2016 } 2009 }
2017 2010
2018 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); 2011 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
2019 if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) { 2012 if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) {
2020 printk(KERN_ERR 2013 netdev_err(dev, "slice-%d: alloced only %d big bufs\n",
2021 "myri10ge: %s:slice-%d: alloced only %d big bufs\n", 2014 slice, ss->rx_big.fill_cnt);
2022 dev->name, slice, ss->rx_big.fill_cnt);
2023 goto abort_with_rx_big_ring; 2015 goto abort_with_rx_big_ring;
2024 } 2016 }
2025 2017
@@ -2358,7 +2350,7 @@ static int myri10ge_open(struct net_device *dev)
2358 mgp->running = MYRI10GE_ETH_STARTING; 2350 mgp->running = MYRI10GE_ETH_STARTING;
2359 status = myri10ge_reset(mgp); 2351 status = myri10ge_reset(mgp);
2360 if (status != 0) { 2352 if (status != 0) {
2361 printk(KERN_ERR "myri10ge: %s: failed reset\n", dev->name); 2353 netdev_err(dev, "failed reset\n");
2362 goto abort_with_nothing; 2354 goto abort_with_nothing;
2363 } 2355 }
2364 2356
@@ -2370,9 +2362,7 @@ static int myri10ge_open(struct net_device *dev)
2370 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES, 2362 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
2371 &cmd, 0); 2363 &cmd, 0);
2372 if (status != 0) { 2364 if (status != 0) {
2373 printk(KERN_ERR 2365 netdev_err(dev, "failed to set number of slices\n");
2374 "myri10ge: %s: failed to set number of slices\n",
2375 dev->name);
2376 goto abort_with_nothing; 2366 goto abort_with_nothing;
2377 } 2367 }
2378 /* setup the indirection table */ 2368 /* setup the indirection table */
@@ -2384,9 +2374,7 @@ static int myri10ge_open(struct net_device *dev)
2384 MXGEFW_CMD_GET_RSS_TABLE_OFFSET, 2374 MXGEFW_CMD_GET_RSS_TABLE_OFFSET,
2385 &cmd, 0); 2375 &cmd, 0);
2386 if (status != 0) { 2376 if (status != 0) {
2387 printk(KERN_ERR 2377 netdev_err(dev, "failed to setup rss tables\n");
2388 "myri10ge: %s: failed to setup rss tables\n",
2389 dev->name);
2390 goto abort_with_nothing; 2378 goto abort_with_nothing;
2391 } 2379 }
2392 2380
@@ -2400,9 +2388,7 @@ static int myri10ge_open(struct net_device *dev)
2400 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_ENABLE, 2388 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_ENABLE,
2401 &cmd, 0); 2389 &cmd, 0);
2402 if (status != 0) { 2390 if (status != 0) {
2403 printk(KERN_ERR 2391 netdev_err(dev, "failed to enable slices\n");
2404 "myri10ge: %s: failed to enable slices\n",
2405 dev->name);
2406 goto abort_with_nothing; 2392 goto abort_with_nothing;
2407 } 2393 }
2408 } 2394 }
@@ -2450,9 +2436,7 @@ static int myri10ge_open(struct net_device *dev)
2450 2436
2451 status = myri10ge_get_txrx(mgp, slice); 2437 status = myri10ge_get_txrx(mgp, slice);
2452 if (status != 0) { 2438 if (status != 0) {
2453 printk(KERN_ERR 2439 netdev_err(dev, "failed to get ring sizes or locations\n");
2454 "myri10ge: %s: failed to get ring sizes or locations\n",
2455 dev->name);
2456 goto abort_with_rings; 2440 goto abort_with_rings;
2457 } 2441 }
2458 status = myri10ge_allocate_rings(ss); 2442 status = myri10ge_allocate_rings(ss);
@@ -2465,9 +2449,7 @@ static int myri10ge_open(struct net_device *dev)
2465 if (slice == 0 || mgp->dev->real_num_tx_queues > 1) 2449 if (slice == 0 || mgp->dev->real_num_tx_queues > 1)
2466 status = myri10ge_set_stats(mgp, slice); 2450 status = myri10ge_set_stats(mgp, slice);
2467 if (status) { 2451 if (status) {
2468 printk(KERN_ERR 2452 netdev_err(dev, "Couldn't set stats DMA\n");
2469 "myri10ge: %s: Couldn't set stats DMA\n",
2470 dev->name);
2471 goto abort_with_rings; 2453 goto abort_with_rings;
2472 } 2454 }
2473 2455
@@ -2498,8 +2480,7 @@ static int myri10ge_open(struct net_device *dev)
2498 status |= 2480 status |=
2499 myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd, 0); 2481 myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd, 0);
2500 if (status) { 2482 if (status) {
2501 printk(KERN_ERR "myri10ge: %s: Couldn't set buffer sizes\n", 2483 netdev_err(dev, "Couldn't set buffer sizes\n");
2502 dev->name);
2503 goto abort_with_rings; 2484 goto abort_with_rings;
2504 } 2485 }
2505 2486
@@ -2511,8 +2492,7 @@ static int myri10ge_open(struct net_device *dev)
2511 cmd.data0 = 0; 2492 cmd.data0 = 0;
2512 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_TSO_MODE, &cmd, 0); 2493 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_TSO_MODE, &cmd, 0);
2513 if (status && status != -ENOSYS) { 2494 if (status && status != -ENOSYS) {
2514 printk(KERN_ERR "myri10ge: %s: Couldn't set TSO mode\n", 2495 netdev_err(dev, "Couldn't set TSO mode\n");
2515 dev->name);
2516 goto abort_with_rings; 2496 goto abort_with_rings;
2517 } 2497 }
2518 2498
@@ -2521,8 +2501,7 @@ static int myri10ge_open(struct net_device *dev)
2521 2501
2522 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0); 2502 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0);
2523 if (status) { 2503 if (status) {
2524 printk(KERN_ERR "myri10ge: %s: Couldn't bring up link\n", 2504 netdev_err(dev, "Couldn't bring up link\n");
2525 dev->name);
2526 goto abort_with_rings; 2505 goto abort_with_rings;
2527 } 2506 }
2528 2507
@@ -2575,15 +2554,12 @@ static int myri10ge_close(struct net_device *dev)
2575 status = 2554 status =
2576 myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0); 2555 myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0);
2577 if (status) 2556 if (status)
2578 printk(KERN_ERR 2557 netdev_err(dev, "Couldn't bring down link\n");
2579 "myri10ge: %s: Couldn't bring down link\n",
2580 dev->name);
2581 2558
2582 wait_event_timeout(mgp->down_wq, old_down_cnt != mgp->down_cnt, 2559 wait_event_timeout(mgp->down_wq, old_down_cnt != mgp->down_cnt,
2583 HZ); 2560 HZ);
2584 if (old_down_cnt == mgp->down_cnt) 2561 if (old_down_cnt == mgp->down_cnt)
2585 printk(KERN_ERR "myri10ge: %s never got down irq\n", 2562 netdev_err(dev, "never got down irq\n");
2586 dev->name);
2587 } 2563 }
2588 netif_tx_disable(dev); 2564 netif_tx_disable(dev);
2589 myri10ge_free_irq(mgp); 2565 myri10ge_free_irq(mgp);
@@ -2944,9 +2920,7 @@ abort_linearize:
2944 idx = (idx + 1) & tx->mask; 2920 idx = (idx + 1) & tx->mask;
2945 } while (idx != last_idx); 2921 } while (idx != last_idx);
2946 if (skb_is_gso(skb)) { 2922 if (skb_is_gso(skb)) {
2947 printk(KERN_ERR 2923 netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n");
2948 "myri10ge: %s: TSO but wanted to linearize?!?!?\n",
2949 mgp->dev->name);
2950 goto drop; 2924 goto drop;
2951 } 2925 }
2952 2926
@@ -3043,8 +3017,8 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
3043 3017
3044 err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1); 3018 err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
3045 if (err != 0) { 3019 if (err != 0) {
3046 printk(KERN_ERR "myri10ge: %s: Failed MXGEFW_ENABLE_ALLMULTI," 3020 netdev_err(dev, "Failed MXGEFW_ENABLE_ALLMULTI, error status: %d\n",
3047 " error status: %d\n", dev->name, err); 3021 err);
3048 goto abort; 3022 goto abort;
3049 } 3023 }
3050 3024
@@ -3058,14 +3032,13 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
3058 err = myri10ge_send_cmd(mgp, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, 3032 err = myri10ge_send_cmd(mgp, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS,
3059 &cmd, 1); 3033 &cmd, 1);
3060 if (err != 0) { 3034 if (err != 0) {
3061 printk(KERN_ERR 3035 netdev_err(dev, "Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, error status: %d\n",
3062 "myri10ge: %s: Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS" 3036 err);
3063 ", error status: %d\n", dev->name, err);
3064 goto abort; 3037 goto abort;
3065 } 3038 }
3066 3039
3067 /* Walk the multicast list, and add each address */ 3040 /* Walk the multicast list, and add each address */
3068 for (mc_list = dev->mc_list; mc_list != NULL; mc_list = mc_list->next) { 3041 netdev_for_each_mc_addr(mc_list, dev) {
3069 memcpy(data, &mc_list->dmi_addr, 6); 3042 memcpy(data, &mc_list->dmi_addr, 6);
3070 cmd.data0 = ntohl(data[0]); 3043 cmd.data0 = ntohl(data[0]);
3071 cmd.data1 = ntohl(data[1]); 3044 cmd.data1 = ntohl(data[1]);
@@ -3073,18 +3046,16 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
3073 &cmd, 1); 3046 &cmd, 1);
3074 3047
3075 if (err != 0) { 3048 if (err != 0) {
3076 printk(KERN_ERR "myri10ge: %s: Failed " 3049 netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n",
3077 "MXGEFW_JOIN_MULTICAST_GROUP, error status:" 3050 err, mc_list->dmi_addr);
3078 "%d\t", dev->name, err);
3079 printk(KERN_ERR "MAC %pM\n", mc_list->dmi_addr);
3080 goto abort; 3051 goto abort;
3081 } 3052 }
3082 } 3053 }
3083 /* Enable multicast filtering */ 3054 /* Enable multicast filtering */
3084 err = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_ALLMULTI, &cmd, 1); 3055 err = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_ALLMULTI, &cmd, 1);
3085 if (err != 0) { 3056 if (err != 0) {
3086 printk(KERN_ERR "myri10ge: %s: Failed MXGEFW_DISABLE_ALLMULTI," 3057 netdev_err(dev, "Failed MXGEFW_DISABLE_ALLMULTI, error status: %d\n",
3087 "error status: %d\n", dev->name, err); 3058 err);
3088 goto abort; 3059 goto abort;
3089 } 3060 }
3090 3061
@@ -3105,9 +3076,8 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
3105 3076
3106 status = myri10ge_update_mac_address(mgp, sa->sa_data); 3077 status = myri10ge_update_mac_address(mgp, sa->sa_data);
3107 if (status != 0) { 3078 if (status != 0) {
3108 printk(KERN_ERR 3079 netdev_err(dev, "changing mac address failed with %d\n",
3109 "myri10ge: %s: changing mac address failed with %d\n", 3080 status);
3110 dev->name, status);
3111 return status; 3081 return status;
3112 } 3082 }
3113 3083
@@ -3122,12 +3092,10 @@ static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
3122 int error = 0; 3092 int error = 0;
3123 3093
3124 if ((new_mtu < 68) || (ETH_HLEN + new_mtu > MYRI10GE_MAX_ETHER_MTU)) { 3094 if ((new_mtu < 68) || (ETH_HLEN + new_mtu > MYRI10GE_MAX_ETHER_MTU)) {
3125 printk(KERN_ERR "myri10ge: %s: new mtu (%d) is not valid\n", 3095 netdev_err(dev, "new mtu (%d) is not valid\n", new_mtu);
3126 dev->name, new_mtu);
3127 return -EINVAL; 3096 return -EINVAL;
3128 } 3097 }
3129 printk(KERN_INFO "%s: changing mtu from %d to %d\n", 3098 netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu);
3130 dev->name, dev->mtu, new_mtu);
3131 if (mgp->running) { 3099 if (mgp->running) {
3132 /* if we change the mtu on an active device, we must 3100 /* if we change the mtu on an active device, we must
3133 * reset the device so the firmware sees the change */ 3101 * reset the device so the firmware sees the change */
@@ -3356,7 +3324,7 @@ static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state)
3356 3324
3357 netif_device_detach(netdev); 3325 netif_device_detach(netdev);
3358 if (netif_running(netdev)) { 3326 if (netif_running(netdev)) {
3359 printk(KERN_INFO "myri10ge: closing %s\n", netdev->name); 3327 netdev_info(netdev, "closing\n");
3360 rtnl_lock(); 3328 rtnl_lock();
3361 myri10ge_close(netdev); 3329 myri10ge_close(netdev);
3362 rtnl_unlock(); 3330 rtnl_unlock();
@@ -3383,8 +3351,7 @@ static int myri10ge_resume(struct pci_dev *pdev)
3383 msleep(5); /* give card time to respond */ 3351 msleep(5); /* give card time to respond */
3384 pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor); 3352 pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
3385 if (vendor == 0xffff) { 3353 if (vendor == 0xffff) {
3386 printk(KERN_ERR "myri10ge: %s: device disappeared!\n", 3354 netdev_err(mgp->dev, "device disappeared!\n");
3387 mgp->dev->name);
3388 return -EIO; 3355 return -EIO;
3389 } 3356 }
3390 3357
@@ -3463,10 +3430,9 @@ static void myri10ge_watchdog(struct work_struct *work)
3463 * if the card rebooted due to a parity error 3430 * if the card rebooted due to a parity error
3464 * For now, just report it */ 3431 * For now, just report it */
3465 reboot = myri10ge_read_reboot(mgp); 3432 reboot = myri10ge_read_reboot(mgp);
3466 printk(KERN_ERR 3433 netdev_err(mgp->dev, "NIC rebooted (0x%x),%s resetting\n",
3467 "myri10ge: %s: NIC rebooted (0x%x),%s resetting\n", 3434 reboot,
3468 mgp->dev->name, reboot, 3435 myri10ge_reset_recover ? "" : " not");
3469 myri10ge_reset_recover ? " " : " not");
3470 if (myri10ge_reset_recover == 0) 3436 if (myri10ge_reset_recover == 0)
3471 return; 3437 return;
3472 rtnl_lock(); 3438 rtnl_lock();
@@ -3494,31 +3460,26 @@ static void myri10ge_watchdog(struct work_struct *work)
3494 if (cmd == 0xffff) { 3460 if (cmd == 0xffff) {
3495 pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor); 3461 pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
3496 if (vendor == 0xffff) { 3462 if (vendor == 0xffff) {
3497 printk(KERN_ERR 3463 netdev_err(mgp->dev, "device disappeared!\n");
3498 "myri10ge: %s: device disappeared!\n",
3499 mgp->dev->name);
3500 return; 3464 return;
3501 } 3465 }
3502 } 3466 }
3503 /* Perhaps it is a software error. Try to reset */ 3467 /* Perhaps it is a software error. Try to reset */
3504 3468
3505 printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n", 3469 netdev_err(mgp->dev, "device timeout, resetting\n");
3506 mgp->dev->name);
3507 for (i = 0; i < mgp->num_slices; i++) { 3470 for (i = 0; i < mgp->num_slices; i++) {
3508 tx = &mgp->ss[i].tx; 3471 tx = &mgp->ss[i].tx;
3509 printk(KERN_INFO 3472 netdev_err(mgp->dev, "(%d): %d %d %d %d %d %d\n",
3510 "myri10ge: %s: (%d): %d %d %d %d %d %d\n", 3473 i, tx->queue_active, tx->req,
3511 mgp->dev->name, i, tx->queue_active, tx->req, 3474 tx->done, tx->pkt_start, tx->pkt_done,
3512 tx->done, tx->pkt_start, tx->pkt_done, 3475 (int)ntohl(mgp->ss[i].fw_stats->
3513 (int)ntohl(mgp->ss[i].fw_stats-> 3476 send_done_count));
3514 send_done_count));
3515 msleep(2000); 3477 msleep(2000);
3516 printk(KERN_INFO 3478 netdev_info(mgp->dev, "(%d): %d %d %d %d %d %d\n",
3517 "myri10ge: %s: (%d): %d %d %d %d %d %d\n", 3479 i, tx->queue_active, tx->req,
3518 mgp->dev->name, i, tx->queue_active, tx->req, 3480 tx->done, tx->pkt_start, tx->pkt_done,
3519 tx->done, tx->pkt_start, tx->pkt_done, 3481 (int)ntohl(mgp->ss[i].fw_stats->
3520 (int)ntohl(mgp->ss[i].fw_stats-> 3482 send_done_count));
3521 send_done_count));
3522 } 3483 }
3523 } 3484 }
3524 3485
@@ -3528,8 +3489,7 @@ static void myri10ge_watchdog(struct work_struct *work)
3528 } 3489 }
3529 status = myri10ge_load_firmware(mgp, 1); 3490 status = myri10ge_load_firmware(mgp, 1);
3530 if (status != 0) 3491 if (status != 0)
3531 printk(KERN_ERR "myri10ge: %s: failed to load firmware\n", 3492 netdev_err(mgp->dev, "failed to load firmware\n");
3532 mgp->dev->name);
3533 else 3493 else
3534 myri10ge_open(mgp->dev); 3494 myri10ge_open(mgp->dev);
3535 rtnl_unlock(); 3495 rtnl_unlock();
@@ -3580,14 +3540,10 @@ static void myri10ge_watchdog_timer(unsigned long arg)
3580 /* nic seems like it might be stuck.. */ 3540 /* nic seems like it might be stuck.. */
3581 if (rx_pause_cnt != mgp->watchdog_pause) { 3541 if (rx_pause_cnt != mgp->watchdog_pause) {
3582 if (net_ratelimit()) 3542 if (net_ratelimit())
3583 printk(KERN_WARNING 3543 netdev_err(mgp->dev, "slice %d: TX paused, check link partner\n",
3584 "myri10ge %s slice %d:" 3544 i);
3585 "TX paused, check link partner\n",
3586 mgp->dev->name, i);
3587 } else { 3545 } else {
3588 printk(KERN_WARNING 3546 netdev_warn(mgp->dev, "slice %d stuck:", i);
3589 "myri10ge %s slice %d stuck:",
3590 mgp->dev->name, i);
3591 reset_needed = 1; 3547 reset_needed = 1;
3592 } 3548 }
3593 } 3549 }
@@ -4085,7 +4041,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
4085#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008 4041#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
4086#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009 4042#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009
4087 4043
4088static struct pci_device_id myri10ge_pci_tbl[] = { 4044static DEFINE_PCI_DEVICE_TABLE(myri10ge_pci_tbl) = {
4089 {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)}, 4045 {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
4090 {PCI_DEVICE 4046 {PCI_DEVICE
4091 (PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)}, 4047 (PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)},
@@ -4127,13 +4083,11 @@ static struct notifier_block myri10ge_dca_notifier = {
4127 4083
4128static __init int myri10ge_init_module(void) 4084static __init int myri10ge_init_module(void)
4129{ 4085{
4130 printk(KERN_INFO "%s: Version %s\n", myri10ge_driver.name, 4086 pr_info("Version %s\n", MYRI10GE_VERSION_STR);
4131 MYRI10GE_VERSION_STR);
4132 4087
4133 if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_MAX) { 4088 if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_MAX) {
4134 printk(KERN_ERR 4089 pr_err("Illegal rssh hash type %d, defaulting to source port\n",
4135 "%s: Illegal rssh hash type %d, defaulting to source port\n", 4090 myri10ge_rss_hash);
4136 myri10ge_driver.name, myri10ge_rss_hash);
4137 myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT; 4091 myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
4138 } 4092 }
4139#ifdef CONFIG_MYRI10GE_DCA 4093#ifdef CONFIG_MYRI10GE_DCA
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index b3513ad3b703..8b4313085359 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -716,10 +716,10 @@ static int myri_header(struct sk_buff *skb, struct net_device *dev,
716 pad[0] = MYRI_PAD_LEN; 716 pad[0] = MYRI_PAD_LEN;
717 pad[1] = 0xab; 717 pad[1] = 0xab;
718 718
719 /* Set the protocol type. For a packet of type ETH_P_802_3 we put the length 719 /* Set the protocol type. For a packet of type ETH_P_802_3/2 we put the
720 * in here instead. It is up to the 802.2 layer to carry protocol information. 720 * length in here instead.
721 */ 721 */
722 if (type != ETH_P_802_3) 722 if (type != ETH_P_802_3 && type != ETH_P_802_2)
723 eth->h_proto = htons(type); 723 eth->h_proto = htons(type);
724 else 724 else
725 eth->h_proto = htons(len); 725 eth->h_proto = htons(len);
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 797fe164ce27..e52038783245 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -247,7 +247,7 @@ static struct {
247 { "NatSemi DP8381[56]", 0, 24 }, 247 { "NatSemi DP8381[56]", 0, 24 },
248}; 248};
249 249
250static struct pci_device_id natsemi_pci_tbl[] __devinitdata = { 250static DEFINE_PCI_DEVICE_TABLE(natsemi_pci_tbl) = {
251 { PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 }, 251 { PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 },
252 { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, 252 { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
253 { } /* terminate list */ 253 { } /* terminate list */
@@ -2488,16 +2488,16 @@ static void __set_rx_mode(struct net_device *dev)
2488 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 2488 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2489 rx_mode = RxFilterEnable | AcceptBroadcast 2489 rx_mode = RxFilterEnable | AcceptBroadcast
2490 | AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys; 2490 | AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
2491 } else if ((dev->mc_count > multicast_filter_limit) || 2491 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2492 (dev->flags & IFF_ALLMULTI)) { 2492 (dev->flags & IFF_ALLMULTI)) {
2493 rx_mode = RxFilterEnable | AcceptBroadcast 2493 rx_mode = RxFilterEnable | AcceptBroadcast
2494 | AcceptAllMulticast | AcceptMyPhys; 2494 | AcceptAllMulticast | AcceptMyPhys;
2495 } else { 2495 } else {
2496 struct dev_mc_list *mclist; 2496 struct dev_mc_list *mclist;
2497 int i; 2497 int i;
2498
2498 memset(mc_filter, 0, sizeof(mc_filter)); 2499 memset(mc_filter, 0, sizeof(mc_filter));
2499 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 2500 netdev_for_each_mc_addr(mclist, dev) {
2500 i++, mclist = mclist->next) {
2501 int b = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff; 2501 int b = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff;
2502 mc_filter[b/8] |= (1 << (b & 0x07)); 2502 mc_filter[b/8] |= (1 << (b & 0x07));
2503 } 2503 }
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 3fcebb70151c..85aec4f10131 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -136,7 +136,7 @@ static struct {
136}; 136};
137 137
138 138
139static struct pci_device_id ne2k_pci_tbl[] = { 139static DEFINE_PCI_DEVICE_TABLE(ne2k_pci_tbl) = {
140 { 0x10ec, 0x8029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_RealTek_RTL_8029 }, 140 { 0x10ec, 0x8029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_RealTek_RTL_8029 },
141 { 0x1050, 0x0940, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940 }, 141 { 0x1050, 0x0940, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940 },
142 { 0x11f6, 0x1401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Compex_RL2000 }, 142 { 0x11f6, 0x1401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Compex_RL2000 },
diff --git a/drivers/net/netxen/Makefile b/drivers/net/netxen/Makefile
index 11d94e2434e4..861a0590b1f4 100644
--- a/drivers/net/netxen/Makefile
+++ b/drivers/net/netxen/Makefile
@@ -18,7 +18,7 @@
18# MA 02111-1307, USA. 18# MA 02111-1307, USA.
19# 19#
20# The full GNU General Public License is included in this distribution 20# The full GNU General Public License is included in this distribution
21# in the file called LICENSE. 21# in the file called "COPYING".
22# 22#
23# 23#
24 24
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 9bc5bd1d538a..144d2e880422 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
@@ -420,7 +420,7 @@ struct status_desc {
420} __attribute__ ((aligned(16))); 420} __attribute__ ((aligned(16)));
421 421
422/* UNIFIED ROMIMAGE *************************/ 422/* UNIFIED ROMIMAGE *************************/
423#define NX_UNI_FW_MIN_SIZE 0x3eb000 423#define NX_UNI_FW_MIN_SIZE 0xc8000
424#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0 424#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0
425#define NX_UNI_DIR_SECT_BOOTLD 0x6 425#define NX_UNI_DIR_SECT_BOOTLD 0x6
426#define NX_UNI_DIR_SECT_FW 0x7 426#define NX_UNI_DIR_SECT_FW 0x7
@@ -1427,8 +1427,8 @@ static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
1427 1427
1428} 1428}
1429 1429
1430int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac); 1430int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac);
1431int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac); 1431int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac);
1432extern void netxen_change_ringparam(struct netxen_adapter *adapter); 1432extern void netxen_change_ringparam(struct netxen_adapter *adapter);
1433extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, 1433extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr,
1434 int *valp); 1434 int *valp);
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 9cb8f6878047..2a8ef5fc9663 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 542f408333ff..f8499e56cbee 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index d138fc22927a..622e4c8be937 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
@@ -969,7 +969,8 @@ enum {
969#define NX_DEV_READY 3 969#define NX_DEV_READY 3
970#define NX_DEV_NEED_RESET 4 970#define NX_DEV_NEED_RESET 4
971#define NX_DEV_NEED_QUISCENT 5 971#define NX_DEV_NEED_QUISCENT 5
972#define NX_DEV_FAILED 6 972#define NX_DEV_NEED_AER 6
973#define NX_DEV_FAILED 7
973 974
974#define NX_RCODE_DRIVER_INFO 0x20000000 975#define NX_RCODE_DRIVER_INFO 0x20000000
975#define NX_RCODE_DRIVER_CAN_RELOAD 0x40000000 976#define NX_RCODE_DRIVER_CAN_RELOAD 0x40000000
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 85e28e60ecf1..a945591298a8 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
@@ -539,7 +539,7 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
539 struct netxen_adapter *adapter = netdev_priv(netdev); 539 struct netxen_adapter *adapter = netdev_priv(netdev);
540 struct dev_mc_list *mc_ptr; 540 struct dev_mc_list *mc_ptr;
541 u8 null_addr[6]; 541 u8 null_addr[6];
542 int index = 0; 542 int i;
543 543
544 memset(null_addr, 0, 6); 544 memset(null_addr, 0, 6);
545 545
@@ -554,7 +554,7 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
554 return; 554 return;
555 } 555 }
556 556
557 if (netdev->mc_count == 0) { 557 if (netdev_mc_empty(netdev)) {
558 adapter->set_promisc(adapter, 558 adapter->set_promisc(adapter,
559 NETXEN_NIU_NON_PROMISC_MODE); 559 NETXEN_NIU_NON_PROMISC_MODE);
560 netxen_nic_disable_mcast_filter(adapter); 560 netxen_nic_disable_mcast_filter(adapter);
@@ -563,23 +563,20 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
563 563
564 adapter->set_promisc(adapter, NETXEN_NIU_ALLMULTI_MODE); 564 adapter->set_promisc(adapter, NETXEN_NIU_ALLMULTI_MODE);
565 if (netdev->flags & IFF_ALLMULTI || 565 if (netdev->flags & IFF_ALLMULTI ||
566 netdev->mc_count > adapter->max_mc_count) { 566 netdev_mc_count(netdev) > adapter->max_mc_count) {
567 netxen_nic_disable_mcast_filter(adapter); 567 netxen_nic_disable_mcast_filter(adapter);
568 return; 568 return;
569 } 569 }
570 570
571 netxen_nic_enable_mcast_filter(adapter); 571 netxen_nic_enable_mcast_filter(adapter);
572 572
573 for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next, index++) 573 i = 0;
574 netxen_nic_set_mcast_addr(adapter, index, mc_ptr->dmi_addr); 574 netdev_for_each_mc_addr(mc_ptr, netdev)
575 575 netxen_nic_set_mcast_addr(adapter, i++, mc_ptr->dmi_addr);
576 if (index != netdev->mc_count)
577 printk(KERN_WARNING "%s: %s multicast address count mismatch\n",
578 netxen_nic_driver_name, netdev->name);
579 576
580 /* Clear out remaining addresses */ 577 /* Clear out remaining addresses */
581 for (; index < adapter->max_mc_count; index++) 578 while (i < adapter->max_mc_count)
582 netxen_nic_set_mcast_addr(adapter, index, null_addr); 579 netxen_nic_set_mcast_addr(adapter, i++, null_addr);
583} 580}
584 581
585static int 582static int
@@ -704,16 +701,14 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
704 } 701 }
705 702
706 if ((netdev->flags & IFF_ALLMULTI) || 703 if ((netdev->flags & IFF_ALLMULTI) ||
707 (netdev->mc_count > adapter->max_mc_count)) { 704 (netdev_mc_count(netdev) > adapter->max_mc_count)) {
708 mode = VPORT_MISS_MODE_ACCEPT_MULTI; 705 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
709 goto send_fw_cmd; 706 goto send_fw_cmd;
710 } 707 }
711 708
712 if (netdev->mc_count > 0) { 709 if (!netdev_mc_empty(netdev)) {
713 for (mc_ptr = netdev->mc_list; mc_ptr; 710 netdev_for_each_mc_addr(mc_ptr, netdev)
714 mc_ptr = mc_ptr->next) {
715 nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, &del_list); 711 nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, &del_list);
716 }
717 } 712 }
718 713
719send_fw_cmd: 714send_fw_cmd:
@@ -777,17 +772,20 @@ int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr)
777int netxen_config_intr_coalesce(struct netxen_adapter *adapter) 772int netxen_config_intr_coalesce(struct netxen_adapter *adapter)
778{ 773{
779 nx_nic_req_t req; 774 nx_nic_req_t req;
780 u64 word; 775 u64 word[6];
781 int rv; 776 int rv, i;
782 777
783 memset(&req, 0, sizeof(nx_nic_req_t)); 778 memset(&req, 0, sizeof(nx_nic_req_t));
779 memset(word, 0, sizeof(word));
784 780
785 req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); 781 req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
786 782
787 word = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16); 783 word[0] = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
788 req.req_hdr = cpu_to_le64(word); 784 req.req_hdr = cpu_to_le64(word[0]);
789 785
790 memcpy(&req.words[0], &adapter->coal, sizeof(adapter->coal)); 786 memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
787 for (i = 0; i < 6; i++)
788 req.words[i] = cpu_to_le64(word[i]);
791 789
792 rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); 790 rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
793 if (rv != 0) { 791 if (rv != 0) {
@@ -1033,7 +1031,7 @@ static int netxen_get_flash_block(struct netxen_adapter *adapter, int base,
1033 return 0; 1031 return 0;
1034} 1032}
1035 1033
1036int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac) 1034int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac)
1037{ 1035{
1038 __le32 *pmac = (__le32 *) mac; 1036 __le32 *pmac = (__le32 *) mac;
1039 u32 offset; 1037 u32 offset;
@@ -1058,7 +1056,7 @@ int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
1058 return 0; 1056 return 0;
1059} 1057}
1060 1058
1061int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac) 1059int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac)
1062{ 1060{
1063 uint32_t crbaddr, mac_hi, mac_lo; 1061 uint32_t crbaddr, mac_hi, mac_lo;
1064 int pci_func = adapter->ahw.pci_func; 1062 int pci_func = adapter->ahw.pci_func;
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h
index 3fd1dcb3583a..e2c5b6f2df03 100644
--- a/drivers/net/netxen/netxen_nic_hw.h
+++ b/drivers/net/netxen/netxen_nic_hw.h
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 64cff68d372c..1c63610ead42 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
@@ -780,6 +780,9 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
780 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 780 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
781 return 1; 781 return 1;
782 782
783 if (adapter->need_fw_reset)
784 return 1;
785
783 /* last attempt had failed */ 786 /* last attempt had failed */
784 if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED) 787 if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
785 return 1; 788 return 1;
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 24279e6e55f5..08780ef1c1f8 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
@@ -35,6 +35,7 @@
35#include <linux/ipv6.h> 35#include <linux/ipv6.h>
36#include <linux/inetdevice.h> 36#include <linux/inetdevice.h>
37#include <linux/sysfs.h> 37#include <linux/sysfs.h>
38#include <linux/aer.h>
38 39
39MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver"); 40MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
40MODULE_LICENSE("GPL"); 41MODULE_LICENSE("GPL");
@@ -84,6 +85,7 @@ static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
84static void netxen_create_diag_entries(struct netxen_adapter *adapter); 85static void netxen_create_diag_entries(struct netxen_adapter *adapter);
85static void netxen_remove_diag_entries(struct netxen_adapter *adapter); 86static void netxen_remove_diag_entries(struct netxen_adapter *adapter);
86 87
88static int nx_dev_request_aer(struct netxen_adapter *adapter);
87static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter); 89static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter);
88static int netxen_can_start_firmware(struct netxen_adapter *adapter); 90static int netxen_can_start_firmware(struct netxen_adapter *adapter);
89 91
@@ -98,7 +100,7 @@ static void netxen_config_indev_addr(struct net_device *dev, unsigned long);
98 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \ 100 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
99 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} 101 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
100 102
101static struct pci_device_id netxen_pci_tbl[] __devinitdata = { 103static DEFINE_PCI_DEVICE_TABLE(netxen_pci_tbl) = {
102 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR), 104 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
103 ENTRY(PCI_DEVICE_ID_NX2031_10GCX4), 105 ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
104 ENTRY(PCI_DEVICE_ID_NX2031_4GCU), 106 ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
@@ -430,7 +432,7 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
430{ 432{
431 int i; 433 int i;
432 unsigned char *p; 434 unsigned char *p;
433 __le64 mac_addr; 435 u64 mac_addr;
434 struct net_device *netdev = adapter->netdev; 436 struct net_device *netdev = adapter->netdev;
435 struct pci_dev *pdev = adapter->pdev; 437 struct pci_dev *pdev = adapter->pdev;
436 438
@@ -1262,6 +1264,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1262 if ((err = pci_request_regions(pdev, netxen_nic_driver_name))) 1264 if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
1263 goto err_out_disable_pdev; 1265 goto err_out_disable_pdev;
1264 1266
1267 if (NX_IS_REVISION_P3(pdev->revision))
1268 pci_enable_pcie_error_reporting(pdev);
1269
1265 pci_set_master(pdev); 1270 pci_set_master(pdev);
1266 1271
1267 netdev = alloc_etherdev(sizeof(struct netxen_adapter)); 1272 netdev = alloc_etherdev(sizeof(struct netxen_adapter));
@@ -1409,17 +1414,19 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
1409 1414
1410 netxen_release_firmware(adapter); 1415 netxen_release_firmware(adapter);
1411 1416
1417 if (NX_IS_REVISION_P3(pdev->revision))
1418 pci_disable_pcie_error_reporting(pdev);
1419
1412 pci_release_regions(pdev); 1420 pci_release_regions(pdev);
1413 pci_disable_device(pdev); 1421 pci_disable_device(pdev);
1414 pci_set_drvdata(pdev, NULL); 1422 pci_set_drvdata(pdev, NULL);
1415 1423
1416 free_netdev(netdev); 1424 free_netdev(netdev);
1417} 1425}
1418static int __netxen_nic_shutdown(struct pci_dev *pdev) 1426
1427static void netxen_nic_detach_func(struct netxen_adapter *adapter)
1419{ 1428{
1420 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1421 struct net_device *netdev = adapter->netdev; 1429 struct net_device *netdev = adapter->netdev;
1422 int retval;
1423 1430
1424 netif_device_detach(netdev); 1431 netif_device_detach(netdev);
1425 1432
@@ -1438,53 +1445,22 @@ static int __netxen_nic_shutdown(struct pci_dev *pdev)
1438 nx_decr_dev_ref_cnt(adapter); 1445 nx_decr_dev_ref_cnt(adapter);
1439 1446
1440 clear_bit(__NX_RESETTING, &adapter->state); 1447 clear_bit(__NX_RESETTING, &adapter->state);
1441
1442 retval = pci_save_state(pdev);
1443 if (retval)
1444 return retval;
1445
1446 if (netxen_nic_wol_supported(adapter)) {
1447 pci_enable_wake(pdev, PCI_D3cold, 1);
1448 pci_enable_wake(pdev, PCI_D3hot, 1);
1449 }
1450
1451 pci_disable_device(pdev);
1452
1453 return 0;
1454} 1448}
1455static void netxen_nic_shutdown(struct pci_dev *pdev)
1456{
1457 if (__netxen_nic_shutdown(pdev))
1458 return;
1459}
1460#ifdef CONFIG_PM
1461static int
1462netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
1463{
1464 int retval;
1465
1466 retval = __netxen_nic_shutdown(pdev);
1467 if (retval)
1468 return retval;
1469 1449
1470 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1450static int netxen_nic_attach_func(struct pci_dev *pdev)
1471 return 0;
1472}
1473
1474static int
1475netxen_nic_resume(struct pci_dev *pdev)
1476{ 1451{
1477 struct netxen_adapter *adapter = pci_get_drvdata(pdev); 1452 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1478 struct net_device *netdev = adapter->netdev; 1453 struct net_device *netdev = adapter->netdev;
1479 int err; 1454 int err;
1480 1455
1481 pci_set_power_state(pdev, PCI_D0);
1482 pci_restore_state(pdev);
1483
1484 err = pci_enable_device(pdev); 1456 err = pci_enable_device(pdev);
1485 if (err) 1457 if (err)
1486 return err; 1458 return err;
1487 1459
1460 pci_set_power_state(pdev, PCI_D0);
1461 pci_set_master(pdev);
1462 pci_restore_state(pdev);
1463
1488 adapter->ahw.crb_win = -1; 1464 adapter->ahw.crb_win = -1;
1489 adapter->ahw.ocm_win = -1; 1465 adapter->ahw.ocm_win = -1;
1490 1466
@@ -1503,11 +1479,10 @@ netxen_nic_resume(struct pci_dev *pdev)
1503 if (err) 1479 if (err)
1504 goto err_out_detach; 1480 goto err_out_detach;
1505 1481
1506 netif_device_attach(netdev);
1507
1508 netxen_config_indev_addr(netdev, NETDEV_UP); 1482 netxen_config_indev_addr(netdev, NETDEV_UP);
1509 } 1483 }
1510 1484
1485 netif_device_attach(netdev);
1511 netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); 1486 netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
1512 return 0; 1487 return 0;
1513 1488
@@ -1517,6 +1492,85 @@ err_out:
1517 nx_decr_dev_ref_cnt(adapter); 1492 nx_decr_dev_ref_cnt(adapter);
1518 return err; 1493 return err;
1519} 1494}
1495
1496static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev,
1497 pci_channel_state_t state)
1498{
1499 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1500
1501 if (state == pci_channel_io_perm_failure)
1502 return PCI_ERS_RESULT_DISCONNECT;
1503
1504 if (nx_dev_request_aer(adapter))
1505 return PCI_ERS_RESULT_RECOVERED;
1506
1507 netxen_nic_detach_func(adapter);
1508
1509 pci_disable_device(pdev);
1510
1511 return PCI_ERS_RESULT_NEED_RESET;
1512}
1513
1514static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev)
1515{
1516 int err = 0;
1517
1518 err = netxen_nic_attach_func(pdev);
1519
1520 return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
1521}
1522
1523static void netxen_io_resume(struct pci_dev *pdev)
1524{
1525 pci_cleanup_aer_uncorrect_error_status(pdev);
1526}
1527
1528static void netxen_nic_shutdown(struct pci_dev *pdev)
1529{
1530 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1531
1532 netxen_nic_detach_func(adapter);
1533
1534 if (pci_save_state(pdev))
1535 return;
1536
1537 if (netxen_nic_wol_supported(adapter)) {
1538 pci_enable_wake(pdev, PCI_D3cold, 1);
1539 pci_enable_wake(pdev, PCI_D3hot, 1);
1540 }
1541
1542 pci_disable_device(pdev);
1543}
1544
1545#ifdef CONFIG_PM
1546static int
1547netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
1548{
1549 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1550 int retval;
1551
1552 netxen_nic_detach_func(adapter);
1553
1554 retval = pci_save_state(pdev);
1555 if (retval)
1556 return retval;
1557
1558 if (netxen_nic_wol_supported(adapter)) {
1559 pci_enable_wake(pdev, PCI_D3cold, 1);
1560 pci_enable_wake(pdev, PCI_D3hot, 1);
1561 }
1562
1563 pci_disable_device(pdev);
1564 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1565
1566 return 0;
1567}
1568
1569static int
1570netxen_nic_resume(struct pci_dev *pdev)
1571{
1572 return netxen_nic_attach_func(pdev);
1573}
1520#endif 1574#endif
1521 1575
1522static int netxen_nic_open(struct net_device *netdev) 1576static int netxen_nic_open(struct net_device *netdev)
@@ -2104,20 +2158,49 @@ nx_decr_dev_ref_cnt(struct netxen_adapter *adapter)
2104 return count; 2158 return count;
2105} 2159}
2106 2160
2107static void 2161static int
2162nx_dev_request_aer(struct netxen_adapter *adapter)
2163{
2164 u32 state;
2165 int ret = -EINVAL;
2166
2167 if (netxen_api_lock(adapter))
2168 return ret;
2169
2170 state = NXRD32(adapter, NX_CRB_DEV_STATE);
2171
2172 if (state == NX_DEV_NEED_AER)
2173 ret = 0;
2174 else if (state == NX_DEV_READY) {
2175 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_AER);
2176 ret = 0;
2177 }
2178
2179 netxen_api_unlock(adapter);
2180 return ret;
2181}
2182
2183static int
2108nx_dev_request_reset(struct netxen_adapter *adapter) 2184nx_dev_request_reset(struct netxen_adapter *adapter)
2109{ 2185{
2110 u32 state; 2186 u32 state;
2187 int ret = -EINVAL;
2111 2188
2112 if (netxen_api_lock(adapter)) 2189 if (netxen_api_lock(adapter))
2113 return; 2190 return ret;
2114 2191
2115 state = NXRD32(adapter, NX_CRB_DEV_STATE); 2192 state = NXRD32(adapter, NX_CRB_DEV_STATE);
2116 2193
2117 if (state != NX_DEV_INITALIZING) 2194 if (state == NX_DEV_NEED_RESET)
2195 ret = 0;
2196 else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) {
2118 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET); 2197 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET);
2198 ret = 0;
2199 }
2119 2200
2120 netxen_api_unlock(adapter); 2201 netxen_api_unlock(adapter);
2202
2203 return ret;
2121} 2204}
2122 2205
2123static int 2206static int
@@ -2271,17 +2354,29 @@ netxen_check_health(struct netxen_adapter *adapter)
2271 u32 state, heartbit; 2354 u32 state, heartbit;
2272 struct net_device *netdev = adapter->netdev; 2355 struct net_device *netdev = adapter->netdev;
2273 2356
2357 state = NXRD32(adapter, NX_CRB_DEV_STATE);
2358 if (state == NX_DEV_NEED_AER)
2359 return 0;
2360
2274 if (netxen_nic_check_temp(adapter)) 2361 if (netxen_nic_check_temp(adapter))
2275 goto detach; 2362 goto detach;
2276 2363
2277 if (adapter->need_fw_reset) { 2364 if (adapter->need_fw_reset) {
2278 nx_dev_request_reset(adapter); 2365 if (nx_dev_request_reset(adapter))
2366 return 0;
2279 goto detach; 2367 goto detach;
2280 } 2368 }
2281 2369
2282 state = NXRD32(adapter, NX_CRB_DEV_STATE); 2370 /* NX_DEV_NEED_RESET, this state can be marked in two cases
2283 if (state == NX_DEV_NEED_RESET) 2371 * 1. Tx timeout 2. Fw hang
2284 goto detach; 2372 * Send request to destroy context in case of tx timeout only
2373 * and doesn't required in case of Fw hang
2374 */
2375 if (state == NX_DEV_NEED_RESET) {
2376 adapter->need_fw_reset = 1;
2377 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
2378 goto detach;
2379 }
2285 2380
2286 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 2381 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
2287 return 0; 2382 return 0;
@@ -2290,12 +2385,17 @@ netxen_check_health(struct netxen_adapter *adapter)
2290 if (heartbit != adapter->heartbit) { 2385 if (heartbit != adapter->heartbit) {
2291 adapter->heartbit = heartbit; 2386 adapter->heartbit = heartbit;
2292 adapter->fw_fail_cnt = 0; 2387 adapter->fw_fail_cnt = 0;
2388 if (adapter->need_fw_reset)
2389 goto detach;
2293 return 0; 2390 return 0;
2294 } 2391 }
2295 2392
2296 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH) 2393 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2297 return 0; 2394 return 0;
2298 2395
2396 if (nx_dev_request_reset(adapter))
2397 return 0;
2398
2299 clear_bit(__NX_FW_ATTACHED, &adapter->state); 2399 clear_bit(__NX_FW_ATTACHED, &adapter->state);
2300 2400
2301 dev_info(&netdev->dev, "firmware hang detected\n"); 2401 dev_info(&netdev->dev, "firmware hang detected\n");
@@ -2498,7 +2598,7 @@ netxen_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
2498 return size; 2598 return size;
2499} 2599}
2500 2600
2501ssize_t netxen_sysfs_write_mem(struct kobject *kobj, 2601static ssize_t netxen_sysfs_write_mem(struct kobject *kobj,
2502 struct bin_attribute *attr, char *buf, 2602 struct bin_attribute *attr, char *buf,
2503 loff_t offset, size_t size) 2603 loff_t offset, size_t size)
2504{ 2604{
@@ -2725,6 +2825,12 @@ netxen_config_indev_addr(struct net_device *dev, unsigned long event)
2725{ } 2825{ }
2726#endif 2826#endif
2727 2827
2828static struct pci_error_handlers netxen_err_handler = {
2829 .error_detected = netxen_io_error_detected,
2830 .slot_reset = netxen_io_slot_reset,
2831 .resume = netxen_io_resume,
2832};
2833
2728static struct pci_driver netxen_driver = { 2834static struct pci_driver netxen_driver = {
2729 .name = netxen_nic_driver_name, 2835 .name = netxen_nic_driver_name,
2730 .id_table = netxen_pci_tbl, 2836 .id_table = netxen_pci_tbl,
@@ -2734,7 +2840,8 @@ static struct pci_driver netxen_driver = {
2734 .suspend = netxen_nic_suspend, 2840 .suspend = netxen_nic_suspend,
2735 .resume = netxen_nic_resume, 2841 .resume = netxen_nic_resume,
2736#endif 2842#endif
2737 .shutdown = netxen_nic_shutdown 2843 .shutdown = netxen_nic_shutdown,
2844 .err_handler = &netxen_err_handler
2738}; 2845};
2739 2846
2740static int __init netxen_init_module(void) 2847static int __init netxen_init_module(void)
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c
index 6a87d810e59d..c16cbfb4061b 100644
--- a/drivers/net/ni5010.c
+++ b/drivers/net/ni5010.c
@@ -651,7 +651,8 @@ static void ni5010_set_multicast_list(struct net_device *dev)
651 651
652 PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name)); 652 PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name));
653 653
654 if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI || dev->mc_list) { 654 if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI ||
655 !netdev_mc_empty(dev)) {
655 outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */ 656 outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */
656 PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name)); 657 PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name));
657 } else { 658 } else {
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index b42f5e522f90..05c29c2cef2a 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -596,8 +596,8 @@ static int init586(struct net_device *dev)
596 struct iasetup_cmd_struct __iomem *ias_cmd; 596 struct iasetup_cmd_struct __iomem *ias_cmd;
597 struct tdr_cmd_struct __iomem *tdr_cmd; 597 struct tdr_cmd_struct __iomem *tdr_cmd;
598 struct mcsetup_cmd_struct __iomem *mc_cmd; 598 struct mcsetup_cmd_struct __iomem *mc_cmd;
599 struct dev_mc_list *dmi = dev->mc_list; 599 struct dev_mc_list *dmi;
600 int num_addrs = dev->mc_count; 600 int num_addrs = netdev_mc_count(dev);
601 601
602 ptr = p->scb + 1; 602 ptr = p->scb + 1;
603 603
@@ -724,9 +724,9 @@ static int init586(struct net_device *dev)
724 writew(0xffff, &mc_cmd->cmd_link); 724 writew(0xffff, &mc_cmd->cmd_link);
725 writew(num_addrs * 6, &mc_cmd->mc_cnt); 725 writew(num_addrs * 6, &mc_cmd->mc_cnt);
726 726
727 for (i = 0; i < num_addrs; i++, dmi = dmi->next) 727 i = 0;
728 memcpy_toio(mc_cmd->mc_list[i], 728 netdev_for_each_mc_addr(dmi, dev)
729 dmi->dmi_addr, 6); 729 memcpy_toio(mc_cmd->mc_list[i++], dmi->dmi_addr, 6);
730 730
731 writew(make16(mc_cmd), &p->scb->cbl_offset); 731 writew(make16(mc_cmd), &p->scb->cbl_offset);
732 writeb(CUC_START, &p->scb->cmd_cuc); 732 writeb(CUC_START, &p->scb->cmd_cuc);
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index ae19aafd2c7e..9225c76cac40 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -849,7 +849,7 @@ static int ni65_lance_reinit(struct net_device *dev)
849 849
850 if(dev->flags & IFF_PROMISC) 850 if(dev->flags & IFF_PROMISC)
851 ni65_init_lance(p,dev->dev_addr,0x00,M_PROM); 851 ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
852 else if(dev->mc_count || dev->flags & IFF_ALLMULTI) 852 else if (netdev_mc_count(dev) || dev->flags & IFF_ALLMULTI)
853 ni65_init_lance(p,dev->dev_addr,0xff,0x0); 853 ni65_init_lance(p,dev->dev_addr,0xff,0x0);
854 else 854 else
855 ni65_init_lance(p,dev->dev_addr,0x00,0x00); 855 ni65_init_lance(p,dev->dev_addr,0x00,0x00);
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 2aed2b382c40..0678f3106cbc 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -3,6 +3,8 @@
3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
6#include <linux/module.h> 8#include <linux/module.h>
7#include <linux/init.h> 9#include <linux/init.h>
8#include <linux/pci.h> 10#include <linux/pci.h>
@@ -33,7 +35,6 @@
33#include "niu.h" 35#include "niu.h"
34 36
35#define DRV_MODULE_NAME "niu" 37#define DRV_MODULE_NAME "niu"
36#define PFX DRV_MODULE_NAME ": "
37#define DRV_MODULE_VERSION "1.0" 38#define DRV_MODULE_VERSION "1.0"
38#define DRV_MODULE_RELDATE "Nov 14, 2008" 39#define DRV_MODULE_RELDATE "Nov 14, 2008"
39 40
@@ -58,7 +59,7 @@ static void writeq(u64 val, void __iomem *reg)
58} 59}
59#endif 60#endif
60 61
61static struct pci_device_id niu_pci_tbl[] = { 62static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = {
62 {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)}, 63 {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
63 {} 64 {}
64}; 65};
@@ -89,21 +90,6 @@ static int debug = -1;
89module_param(debug, int, 0); 90module_param(debug, int, 0);
90MODULE_PARM_DESC(debug, "NIU debug level"); 91MODULE_PARM_DESC(debug, "NIU debug level");
91 92
92#define niudbg(TYPE, f, a...) \
93do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
94 printk(KERN_DEBUG PFX f, ## a); \
95} while (0)
96
97#define niuinfo(TYPE, f, a...) \
98do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
99 printk(KERN_INFO PFX f, ## a); \
100} while (0)
101
102#define niuwarn(TYPE, f, a...) \
103do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
104 printk(KERN_WARNING PFX f, ## a); \
105} while (0)
106
107#define niu_lock_parent(np, flags) \ 93#define niu_lock_parent(np, flags) \
108 spin_lock_irqsave(&np->parent->lock, flags) 94 spin_lock_irqsave(&np->parent->lock, flags)
109#define niu_unlock_parent(np, flags) \ 95#define niu_unlock_parent(np, flags) \
@@ -135,10 +121,9 @@ static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
135 nw64_mac(reg, bits); 121 nw64_mac(reg, bits);
136 err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay); 122 err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
137 if (err) 123 if (err)
138 dev_err(np->device, PFX "%s: bits (%llx) of register %s " 124 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
139 "would not clear, val[%llx]\n", 125 (unsigned long long)bits, reg_name,
140 np->dev->name, (unsigned long long) bits, reg_name, 126 (unsigned long long)nr64_mac(reg));
141 (unsigned long long) nr64_mac(reg));
142 return err; 127 return err;
143} 128}
144 129
@@ -175,10 +160,9 @@ static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
175 160
176 err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay); 161 err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
177 if (err) 162 if (err)
178 dev_err(np->device, PFX "%s: bits (%llx) of register %s " 163 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
179 "would not clear, val[%llx]\n", 164 (unsigned long long)bits, reg_name,
180 np->dev->name, (unsigned long long) bits, reg_name, 165 (unsigned long long)nr64_ipp(reg));
181 (unsigned long long) nr64_ipp(reg));
182 return err; 166 return err;
183} 167}
184 168
@@ -216,10 +200,9 @@ static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
216 nw64(reg, bits); 200 nw64(reg, bits);
217 err = __niu_wait_bits_clear(np, reg, bits, limit, delay); 201 err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
218 if (err) 202 if (err)
219 dev_err(np->device, PFX "%s: bits (%llx) of register %s " 203 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
220 "would not clear, val[%llx]\n", 204 (unsigned long long)bits, reg_name,
221 np->dev->name, (unsigned long long) bits, reg_name, 205 (unsigned long long)nr64(reg));
222 (unsigned long long) nr64(reg));
223 return err; 206 return err;
224} 207}
225 208
@@ -475,9 +458,8 @@ static int serdes_init_niu_1g_serdes(struct niu *np)
475 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 458 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
476 ESR2_TI_PLL_CFG_L, pll_cfg); 459 ESR2_TI_PLL_CFG_L, pll_cfg);
477 if (err) { 460 if (err) {
478 dev_err(np->device, PFX "NIU Port %d " 461 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
479 "serdes_init_niu_1g_serdes: " 462 np->port, __func__);
480 "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
481 return err; 463 return err;
482 } 464 }
483 465
@@ -486,9 +468,8 @@ static int serdes_init_niu_1g_serdes(struct niu *np)
486 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 468 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
487 ESR2_TI_PLL_STS_L, pll_sts); 469 ESR2_TI_PLL_STS_L, pll_sts);
488 if (err) { 470 if (err) {
489 dev_err(np->device, PFX "NIU Port %d " 471 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
490 "serdes_init_niu_1g_serdes: " 472 np->port, __func__);
491 "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
492 return err; 473 return err;
493 } 474 }
494 475
@@ -531,8 +512,8 @@ static int serdes_init_niu_1g_serdes(struct niu *np)
531 } 512 }
532 513
533 if ((sig & mask) != val) { 514 if ((sig & mask) != val) {
534 dev_err(np->device, PFX "Port %u signal bits [%08x] are not " 515 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
535 "[%08x]\n", np->port, (int) (sig & mask), (int) val); 516 np->port, (int)(sig & mask), (int)val);
536 return -ENODEV; 517 return -ENODEV;
537 } 518 }
538 519
@@ -569,9 +550,8 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
569 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 550 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
570 ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff); 551 ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
571 if (err) { 552 if (err) {
572 dev_err(np->device, PFX "NIU Port %d " 553 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
573 "serdes_init_niu_10g_serdes: " 554 np->port, __func__);
574 "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
575 return err; 555 return err;
576 } 556 }
577 557
@@ -580,9 +560,8 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
580 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 560 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
581 ESR2_TI_PLL_STS_L, pll_sts & 0xffff); 561 ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
582 if (err) { 562 if (err) {
583 dev_err(np->device, PFX "NIU Port %d " 563 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
584 "serdes_init_niu_10g_serdes: " 564 np->port, __func__);
585 "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
586 return err; 565 return err;
587 } 566 }
588 567
@@ -639,9 +618,8 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
639 } 618 }
640 619
641 if ((sig & mask) != val) { 620 if ((sig & mask) != val) {
642 pr_info(PFX "NIU Port %u signal bits [%08x] are not " 621 pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
643 "[%08x] for 10G...trying 1G\n", 622 np->port, (int)(sig & mask), (int)val);
644 np->port, (int) (sig & mask), (int) val);
645 623
646 /* 10G failed, try initializing at 1G */ 624 /* 10G failed, try initializing at 1G */
647 err = serdes_init_niu_1g_serdes(np); 625 err = serdes_init_niu_1g_serdes(np);
@@ -649,8 +627,8 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
649 np->flags &= ~NIU_FLAGS_10G; 627 np->flags &= ~NIU_FLAGS_10G;
650 np->mac_xcvr = MAC_XCVR_PCS; 628 np->mac_xcvr = MAC_XCVR_PCS;
651 } else { 629 } else {
652 dev_err(np->device, PFX "Port %u 10G/1G SERDES " 630 netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
653 "Link Failed \n", np->port); 631 np->port);
654 return -ENODEV; 632 return -ENODEV;
655 } 633 }
656 } 634 }
@@ -764,9 +742,8 @@ static int esr_reset(struct niu *np)
764 if (err) 742 if (err)
765 return err; 743 return err;
766 if (reset != 0) { 744 if (reset != 0) {
767 dev_err(np->device, PFX "Port %u ESR_RESET " 745 netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n",
768 "did not clear [%08x]\n", 746 np->port, reset);
769 np->port, reset);
770 return -ENODEV; 747 return -ENODEV;
771 } 748 }
772 749
@@ -890,8 +867,8 @@ static int serdes_init_10g(struct niu *np)
890 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; 867 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
891 return 0; 868 return 0;
892 } 869 }
893 dev_err(np->device, PFX "Port %u signal bits [%08x] are not " 870 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
894 "[%08x]\n", np->port, (int) (sig & mask), (int) val); 871 np->port, (int)(sig & mask), (int)val);
895 return -ENODEV; 872 return -ENODEV;
896 } 873 }
897 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) 874 if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
@@ -1039,8 +1016,8 @@ static int serdes_init_1g_serdes(struct niu *np)
1039 } 1016 }
1040 1017
1041 if ((sig & mask) != val) { 1018 if ((sig & mask) != val) {
1042 dev_err(np->device, PFX "Port %u signal bits [%08x] are not " 1019 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
1043 "[%08x]\n", np->port, (int) (sig & mask), (int) val); 1020 np->port, (int)(sig & mask), (int)val);
1044 return -ENODEV; 1021 return -ENODEV;
1045 } 1022 }
1046 1023
@@ -1332,8 +1309,8 @@ static int bcm8704_reset(struct niu *np)
1332 break; 1309 break;
1333 } 1310 }
1334 if (limit < 0) { 1311 if (limit < 0) {
1335 dev_err(np->device, PFX "Port %u PHY will not reset " 1312 netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n",
1336 "(bmcr=%04x)\n", np->port, (err & 0xffff)); 1313 np->port, (err & 0xffff));
1337 return -ENODEV; 1314 return -ENODEV;
1338 } 1315 }
1339 return 0; 1316 return 0;
@@ -1515,21 +1492,18 @@ static int xcvr_diag_bcm870x(struct niu *np)
1515 MII_STAT1000); 1492 MII_STAT1000);
1516 if (err < 0) 1493 if (err < 0)
1517 return err; 1494 return err;
1518 pr_info(PFX "Port %u PMA_PMD(MII_STAT1000) [%04x]\n", 1495 pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err);
1519 np->port, err);
1520 1496
1521 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20); 1497 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
1522 if (err < 0) 1498 if (err < 0)
1523 return err; 1499 return err;
1524 pr_info(PFX "Port %u USER_DEV3(0x20) [%04x]\n", 1500 pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err);
1525 np->port, err);
1526 1501
1527 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 1502 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1528 MII_NWAYTEST); 1503 MII_NWAYTEST);
1529 if (err < 0) 1504 if (err < 0)
1530 return err; 1505 return err;
1531 pr_info(PFX "Port %u PHYXS(MII_NWAYTEST) [%04x]\n", 1506 pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err);
1532 np->port, err);
1533#endif 1507#endif
1534 1508
1535 /* XXX dig this out it might not be so useful XXX */ 1509 /* XXX dig this out it might not be so useful XXX */
@@ -1555,11 +1529,11 @@ static int xcvr_diag_bcm870x(struct niu *np)
1555 1529
1556 if (analog_stat0 != 0x03fc) { 1530 if (analog_stat0 != 0x03fc) {
1557 if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) { 1531 if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
1558 pr_info(PFX "Port %u cable not connected " 1532 pr_info("Port %u cable not connected or bad cable\n",
1559 "or bad cable.\n", np->port); 1533 np->port);
1560 } else if (analog_stat0 == 0x639c) { 1534 } else if (analog_stat0 == 0x639c) {
1561 pr_info(PFX "Port %u optical module is bad " 1535 pr_info("Port %u optical module is bad or missing\n",
1562 "or missing.\n", np->port); 1536 np->port);
1563 } 1537 }
1564 } 1538 }
1565 1539
@@ -1699,8 +1673,8 @@ static int mii_reset(struct niu *np)
1699 break; 1673 break;
1700 } 1674 }
1701 if (limit < 0) { 1675 if (limit < 0) {
1702 dev_err(np->device, PFX "Port %u MII would not reset, " 1676 netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n",
1703 "bmcr[%04x]\n", np->port, err); 1677 np->port, err);
1704 return -ENODEV; 1678 return -ENODEV;
1705 } 1679 }
1706 1680
@@ -1895,7 +1869,7 @@ static int mii_init_common(struct niu *np)
1895 return err; 1869 return err;
1896 bmsr = err; 1870 bmsr = err;
1897 1871
1898 pr_info(PFX "Port %u after MII init bmcr[%04x] bmsr[%04x]\n", 1872 pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
1899 np->port, bmcr, bmsr); 1873 np->port, bmcr, bmsr);
1900#endif 1874#endif
1901 1875
@@ -1948,16 +1922,12 @@ static int niu_link_status_common(struct niu *np, int link_up)
1948 unsigned long flags; 1922 unsigned long flags;
1949 1923
1950 if (!netif_carrier_ok(dev) && link_up) { 1924 if (!netif_carrier_ok(dev) && link_up) {
1951 niuinfo(LINK, "%s: Link is up at %s, %s duplex\n", 1925 netif_info(np, link, dev, "Link is up at %s, %s duplex\n",
1952 dev->name, 1926 lp->active_speed == SPEED_10000 ? "10Gb/sec" :
1953 (lp->active_speed == SPEED_10000 ? 1927 lp->active_speed == SPEED_1000 ? "1Gb/sec" :
1954 "10Gb/sec" : 1928 lp->active_speed == SPEED_100 ? "100Mbit/sec" :
1955 (lp->active_speed == SPEED_1000 ? 1929 "10Mbit/sec",
1956 "1Gb/sec" : 1930 lp->active_duplex == DUPLEX_FULL ? "full" : "half");
1957 (lp->active_speed == SPEED_100 ?
1958 "100Mbit/sec" : "10Mbit/sec"))),
1959 (lp->active_duplex == DUPLEX_FULL ?
1960 "full" : "half"));
1961 1931
1962 spin_lock_irqsave(&np->lock, flags); 1932 spin_lock_irqsave(&np->lock, flags);
1963 niu_init_xif(np); 1933 niu_init_xif(np);
@@ -1966,7 +1936,7 @@ static int niu_link_status_common(struct niu *np, int link_up)
1966 1936
1967 netif_carrier_on(dev); 1937 netif_carrier_on(dev);
1968 } else if (netif_carrier_ok(dev) && !link_up) { 1938 } else if (netif_carrier_ok(dev) && !link_up) {
1969 niuwarn(LINK, "%s: Link is down\n", dev->name); 1939 netif_warn(np, link, dev, "Link is down\n");
1970 spin_lock_irqsave(&np->lock, flags); 1940 spin_lock_irqsave(&np->lock, flags);
1971 niu_handle_led(np, 0); 1941 niu_handle_led(np, 0);
1972 spin_unlock_irqrestore(&np->lock, flags); 1942 spin_unlock_irqrestore(&np->lock, flags);
@@ -2232,8 +2202,8 @@ static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
2232 } else { 2202 } else {
2233 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; 2203 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2234 *link_up_p = 0; 2204 *link_up_p = 0;
2235 niuwarn(LINK, "%s: Hotplug PHY Removed\n", 2205 netif_warn(np, link, np->dev,
2236 np->dev->name); 2206 "Hotplug PHY Removed\n");
2237 } 2207 }
2238 } 2208 }
2239out: 2209out:
@@ -2531,8 +2501,8 @@ static int serdes_init_10g_serdes(struct niu *np)
2531 np->flags &= ~NIU_FLAGS_10G; 2501 np->flags &= ~NIU_FLAGS_10G;
2532 np->mac_xcvr = MAC_XCVR_PCS; 2502 np->mac_xcvr = MAC_XCVR_PCS;
2533 } else { 2503 } else {
2534 dev_err(np->device, PFX "Port %u 10G/1G SERDES Link Failed \n", 2504 netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
2535 np->port); 2505 np->port);
2536 return -ENODEV; 2506 return -ENODEV;
2537 } 2507 }
2538 } 2508 }
@@ -3234,23 +3204,22 @@ static int fflp_early_init(struct niu *np)
3234 parent = np->parent; 3204 parent = np->parent;
3235 err = 0; 3205 err = 0;
3236 if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) { 3206 if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
3237 niudbg(PROBE, "fflp_early_init: Initting hw on port %u\n",
3238 np->port);
3239 if (np->parent->plat_type != PLAT_TYPE_NIU) { 3207 if (np->parent->plat_type != PLAT_TYPE_NIU) {
3240 fflp_reset(np); 3208 fflp_reset(np);
3241 fflp_set_timings(np); 3209 fflp_set_timings(np);
3242 err = fflp_disable_all_partitions(np); 3210 err = fflp_disable_all_partitions(np);
3243 if (err) { 3211 if (err) {
3244 niudbg(PROBE, "fflp_disable_all_partitions " 3212 netif_printk(np, probe, KERN_DEBUG, np->dev,
3245 "failed, err=%d\n", err); 3213 "fflp_disable_all_partitions failed, err=%d\n",
3214 err);
3246 goto out; 3215 goto out;
3247 } 3216 }
3248 } 3217 }
3249 3218
3250 err = tcam_early_init(np); 3219 err = tcam_early_init(np);
3251 if (err) { 3220 if (err) {
3252 niudbg(PROBE, "tcam_early_init failed, err=%d\n", 3221 netif_printk(np, probe, KERN_DEBUG, np->dev,
3253 err); 3222 "tcam_early_init failed, err=%d\n", err);
3254 goto out; 3223 goto out;
3255 } 3224 }
3256 fflp_llcsnap_enable(np, 1); 3225 fflp_llcsnap_enable(np, 1);
@@ -3260,22 +3229,22 @@ static int fflp_early_init(struct niu *np)
3260 3229
3261 err = tcam_flush_all(np); 3230 err = tcam_flush_all(np);
3262 if (err) { 3231 if (err) {
3263 niudbg(PROBE, "tcam_flush_all failed, err=%d\n", 3232 netif_printk(np, probe, KERN_DEBUG, np->dev,
3264 err); 3233 "tcam_flush_all failed, err=%d\n", err);
3265 goto out; 3234 goto out;
3266 } 3235 }
3267 if (np->parent->plat_type != PLAT_TYPE_NIU) { 3236 if (np->parent->plat_type != PLAT_TYPE_NIU) {
3268 err = fflp_hash_clear(np); 3237 err = fflp_hash_clear(np);
3269 if (err) { 3238 if (err) {
3270 niudbg(PROBE, "fflp_hash_clear failed, " 3239 netif_printk(np, probe, KERN_DEBUG, np->dev,
3271 "err=%d\n", err); 3240 "fflp_hash_clear failed, err=%d\n",
3241 err);
3272 goto out; 3242 goto out;
3273 } 3243 }
3274 } 3244 }
3275 3245
3276 vlan_tbl_clear(np); 3246 vlan_tbl_clear(np);
3277 3247
3278 niudbg(PROBE, "fflp_early_init: Success\n");
3279 parent->flags |= PARENT_FLGS_CLS_HWINIT; 3248 parent->flags |= PARENT_FLGS_CLS_HWINIT;
3280 } 3249 }
3281out: 3250out:
@@ -3665,8 +3634,8 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
3665 3634
3666 cons = rp->cons; 3635 cons = rp->cons;
3667 3636
3668 niudbg(TX_DONE, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n", 3637 netif_printk(np, tx_done, KERN_DEBUG, np->dev,
3669 np->dev->name, pkt_cnt, cons); 3638 "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
3670 3639
3671 while (pkt_cnt--) 3640 while (pkt_cnt--)
3672 cons = release_tx_packet(np, rp, cons); 3641 cons = release_tx_packet(np, rp, cons);
@@ -3714,11 +3683,12 @@ static inline void niu_sync_rx_discard_stats(struct niu *np,
3714 rp->rx_errors += misc & RXMISC_COUNT; 3683 rp->rx_errors += misc & RXMISC_COUNT;
3715 3684
3716 if (unlikely(misc & RXMISC_OFLOW)) 3685 if (unlikely(misc & RXMISC_OFLOW))
3717 dev_err(np->device, "rx-%d: Counter overflow " 3686 dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n",
3718 "RXMISC discard\n", rx_channel); 3687 rx_channel);
3719 3688
3720 niudbg(RX_ERR, "%s-rx-%d: MISC drop=%u over=%u\n", 3689 netif_printk(np, rx_err, KERN_DEBUG, np->dev,
3721 np->dev->name, rx_channel, misc, misc-limit); 3690 "rx-%d: MISC drop=%u over=%u\n",
3691 rx_channel, misc, misc-limit);
3722 } 3692 }
3723 3693
3724 /* WRED (Weighted Random Early Discard) by hardware */ 3694 /* WRED (Weighted Random Early Discard) by hardware */
@@ -3728,11 +3698,11 @@ static inline void niu_sync_rx_discard_stats(struct niu *np,
3728 rp->rx_dropped += wred & RED_DIS_CNT_COUNT; 3698 rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
3729 3699
3730 if (unlikely(wred & RED_DIS_CNT_OFLOW)) 3700 if (unlikely(wred & RED_DIS_CNT_OFLOW))
3731 dev_err(np->device, "rx-%d: Counter overflow " 3701 dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel);
3732 "WRED discard\n", rx_channel);
3733 3702
3734 niudbg(RX_ERR, "%s-rx-%d: WRED drop=%u over=%u\n", 3703 netif_printk(np, rx_err, KERN_DEBUG, np->dev,
3735 np->dev->name, rx_channel, wred, wred-limit); 3704 "rx-%d: WRED drop=%u over=%u\n",
3705 rx_channel, wred, wred-limit);
3736 } 3706 }
3737} 3707}
3738 3708
@@ -3753,8 +3723,9 @@ static int niu_rx_work(struct napi_struct *napi, struct niu *np,
3753 mbox->rx_dma_ctl_stat = 0; 3723 mbox->rx_dma_ctl_stat = 0;
3754 mbox->rcrstat_a = 0; 3724 mbox->rcrstat_a = 0;
3755 3725
3756 niudbg(RX_STATUS, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n", 3726 netif_printk(np, rx_status, KERN_DEBUG, np->dev,
3757 np->dev->name, rp->rx_channel, (unsigned long long) stat, qlen); 3727 "%s(chan[%d]), stat[%llx] qlen=%d\n",
3728 __func__, rp->rx_channel, (unsigned long long)stat, qlen);
3758 3729
3759 rcr_done = work_done = 0; 3730 rcr_done = work_done = 0;
3760 qlen = min(qlen, budget); 3731 qlen = min(qlen, budget);
@@ -3791,8 +3762,8 @@ static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
3791 u32 rx_vec = (v0 & 0xffffffff); 3762 u32 rx_vec = (v0 & 0xffffffff);
3792 int i, work_done = 0; 3763 int i, work_done = 0;
3793 3764
3794 niudbg(INTR, "%s: niu_poll_core() v0[%016llx]\n", 3765 netif_printk(np, intr, KERN_DEBUG, np->dev,
3795 np->dev->name, (unsigned long long) v0); 3766 "%s() v0[%016llx]\n", __func__, (unsigned long long)v0);
3796 3767
3797 for (i = 0; i < np->num_tx_rings; i++) { 3768 for (i = 0; i < np->num_tx_rings; i++) {
3798 struct tx_ring_info *rp = &np->tx_rings[i]; 3769 struct tx_ring_info *rp = &np->tx_rings[i];
@@ -3837,39 +3808,38 @@ static int niu_poll(struct napi_struct *napi, int budget)
3837static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp, 3808static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
3838 u64 stat) 3809 u64 stat)
3839{ 3810{
3840 dev_err(np->device, PFX "%s: RX channel %u errors ( ", 3811 netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel);
3841 np->dev->name, rp->rx_channel);
3842 3812
3843 if (stat & RX_DMA_CTL_STAT_RBR_TMOUT) 3813 if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
3844 printk("RBR_TMOUT "); 3814 pr_cont("RBR_TMOUT ");
3845 if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR) 3815 if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
3846 printk("RSP_CNT "); 3816 pr_cont("RSP_CNT ");
3847 if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS) 3817 if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
3848 printk("BYTE_EN_BUS "); 3818 pr_cont("BYTE_EN_BUS ");
3849 if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR) 3819 if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
3850 printk("RSP_DAT "); 3820 pr_cont("RSP_DAT ");
3851 if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR) 3821 if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
3852 printk("RCR_ACK "); 3822 pr_cont("RCR_ACK ");
3853 if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR) 3823 if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
3854 printk("RCR_SHA_PAR "); 3824 pr_cont("RCR_SHA_PAR ");
3855 if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR) 3825 if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
3856 printk("RBR_PRE_PAR "); 3826 pr_cont("RBR_PRE_PAR ");
3857 if (stat & RX_DMA_CTL_STAT_CONFIG_ERR) 3827 if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
3858 printk("CONFIG "); 3828 pr_cont("CONFIG ");
3859 if (stat & RX_DMA_CTL_STAT_RCRINCON) 3829 if (stat & RX_DMA_CTL_STAT_RCRINCON)
3860 printk("RCRINCON "); 3830 pr_cont("RCRINCON ");
3861 if (stat & RX_DMA_CTL_STAT_RCRFULL) 3831 if (stat & RX_DMA_CTL_STAT_RCRFULL)
3862 printk("RCRFULL "); 3832 pr_cont("RCRFULL ");
3863 if (stat & RX_DMA_CTL_STAT_RBRFULL) 3833 if (stat & RX_DMA_CTL_STAT_RBRFULL)
3864 printk("RBRFULL "); 3834 pr_cont("RBRFULL ");
3865 if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE) 3835 if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
3866 printk("RBRLOGPAGE "); 3836 pr_cont("RBRLOGPAGE ");
3867 if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE) 3837 if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
3868 printk("CFIGLOGPAGE "); 3838 pr_cont("CFIGLOGPAGE ");
3869 if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR) 3839 if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
3870 printk("DC_FIDO "); 3840 pr_cont("DC_FIDO ");
3871 3841
3872 printk(")\n"); 3842 pr_cont(")\n");
3873} 3843}
3874 3844
3875static int niu_rx_error(struct niu *np, struct rx_ring_info *rp) 3845static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
@@ -3883,9 +3853,9 @@ static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
3883 err = -EINVAL; 3853 err = -EINVAL;
3884 3854
3885 if (err) { 3855 if (err) {
3886 dev_err(np->device, PFX "%s: RX channel %u error, stat[%llx]\n", 3856 netdev_err(np->dev, "RX channel %u error, stat[%llx]\n",
3887 np->dev->name, rp->rx_channel, 3857 rp->rx_channel,
3888 (unsigned long long) stat); 3858 (unsigned long long) stat);
3889 3859
3890 niu_log_rxchan_errors(np, rp, stat); 3860 niu_log_rxchan_errors(np, rp, stat);
3891 } 3861 }
@@ -3899,27 +3869,26 @@ static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
3899static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp, 3869static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
3900 u64 cs) 3870 u64 cs)
3901{ 3871{
3902 dev_err(np->device, PFX "%s: TX channel %u errors ( ", 3872 netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel);
3903 np->dev->name, rp->tx_channel);
3904 3873
3905 if (cs & TX_CS_MBOX_ERR) 3874 if (cs & TX_CS_MBOX_ERR)
3906 printk("MBOX "); 3875 pr_cont("MBOX ");
3907 if (cs & TX_CS_PKT_SIZE_ERR) 3876 if (cs & TX_CS_PKT_SIZE_ERR)
3908 printk("PKT_SIZE "); 3877 pr_cont("PKT_SIZE ");
3909 if (cs & TX_CS_TX_RING_OFLOW) 3878 if (cs & TX_CS_TX_RING_OFLOW)
3910 printk("TX_RING_OFLOW "); 3879 pr_cont("TX_RING_OFLOW ");
3911 if (cs & TX_CS_PREF_BUF_PAR_ERR) 3880 if (cs & TX_CS_PREF_BUF_PAR_ERR)
3912 printk("PREF_BUF_PAR "); 3881 pr_cont("PREF_BUF_PAR ");
3913 if (cs & TX_CS_NACK_PREF) 3882 if (cs & TX_CS_NACK_PREF)
3914 printk("NACK_PREF "); 3883 pr_cont("NACK_PREF ");
3915 if (cs & TX_CS_NACK_PKT_RD) 3884 if (cs & TX_CS_NACK_PKT_RD)
3916 printk("NACK_PKT_RD "); 3885 pr_cont("NACK_PKT_RD ");
3917 if (cs & TX_CS_CONF_PART_ERR) 3886 if (cs & TX_CS_CONF_PART_ERR)
3918 printk("CONF_PART "); 3887 pr_cont("CONF_PART ");
3919 if (cs & TX_CS_PKT_PRT_ERR) 3888 if (cs & TX_CS_PKT_PRT_ERR)
3920 printk("PKT_PTR "); 3889 pr_cont("PKT_PTR ");
3921 3890
3922 printk(")\n"); 3891 pr_cont(")\n");
3923} 3892}
3924 3893
3925static int niu_tx_error(struct niu *np, struct tx_ring_info *rp) 3894static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
@@ -3930,12 +3899,11 @@ static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
3930 logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel)); 3899 logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
3931 logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel)); 3900 logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
3932 3901
3933 dev_err(np->device, PFX "%s: TX channel %u error, " 3902 netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
3934 "cs[%llx] logh[%llx] logl[%llx]\n", 3903 rp->tx_channel,
3935 np->dev->name, rp->tx_channel, 3904 (unsigned long long)cs,
3936 (unsigned long long) cs, 3905 (unsigned long long)logh,
3937 (unsigned long long) logh, 3906 (unsigned long long)logl);
3938 (unsigned long long) logl);
3939 3907
3940 niu_log_txchan_errors(np, rp, cs); 3908 niu_log_txchan_errors(np, rp, cs);
3941 3909
@@ -3954,9 +3922,8 @@ static int niu_mif_interrupt(struct niu *np)
3954 phy_mdint = 1; 3922 phy_mdint = 1;
3955 } 3923 }
3956 3924
3957 dev_err(np->device, PFX "%s: MIF interrupt, " 3925 netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
3958 "stat[%llx] phy_mdint(%d)\n", 3926 (unsigned long long)mif_status, phy_mdint);
3959 np->dev->name, (unsigned long long) mif_status, phy_mdint);
3960 3927
3961 return -ENODEV; 3928 return -ENODEV;
3962} 3929}
@@ -4081,41 +4048,40 @@ static int niu_mac_interrupt(struct niu *np)
4081 4048
4082static void niu_log_device_error(struct niu *np, u64 stat) 4049static void niu_log_device_error(struct niu *np, u64 stat)
4083{ 4050{
4084 dev_err(np->device, PFX "%s: Core device errors ( ", 4051 netdev_err(np->dev, "Core device errors ( ");
4085 np->dev->name);
4086 4052
4087 if (stat & SYS_ERR_MASK_META2) 4053 if (stat & SYS_ERR_MASK_META2)
4088 printk("META2 "); 4054 pr_cont("META2 ");
4089 if (stat & SYS_ERR_MASK_META1) 4055 if (stat & SYS_ERR_MASK_META1)
4090 printk("META1 "); 4056 pr_cont("META1 ");
4091 if (stat & SYS_ERR_MASK_PEU) 4057 if (stat & SYS_ERR_MASK_PEU)
4092 printk("PEU "); 4058 pr_cont("PEU ");
4093 if (stat & SYS_ERR_MASK_TXC) 4059 if (stat & SYS_ERR_MASK_TXC)
4094 printk("TXC "); 4060 pr_cont("TXC ");
4095 if (stat & SYS_ERR_MASK_RDMC) 4061 if (stat & SYS_ERR_MASK_RDMC)
4096 printk("RDMC "); 4062 pr_cont("RDMC ");
4097 if (stat & SYS_ERR_MASK_TDMC) 4063 if (stat & SYS_ERR_MASK_TDMC)
4098 printk("TDMC "); 4064 pr_cont("TDMC ");
4099 if (stat & SYS_ERR_MASK_ZCP) 4065 if (stat & SYS_ERR_MASK_ZCP)
4100 printk("ZCP "); 4066 pr_cont("ZCP ");
4101 if (stat & SYS_ERR_MASK_FFLP) 4067 if (stat & SYS_ERR_MASK_FFLP)
4102 printk("FFLP "); 4068 pr_cont("FFLP ");
4103 if (stat & SYS_ERR_MASK_IPP) 4069 if (stat & SYS_ERR_MASK_IPP)
4104 printk("IPP "); 4070 pr_cont("IPP ");
4105 if (stat & SYS_ERR_MASK_MAC) 4071 if (stat & SYS_ERR_MASK_MAC)
4106 printk("MAC "); 4072 pr_cont("MAC ");
4107 if (stat & SYS_ERR_MASK_SMX) 4073 if (stat & SYS_ERR_MASK_SMX)
4108 printk("SMX "); 4074 pr_cont("SMX ");
4109 4075
4110 printk(")\n"); 4076 pr_cont(")\n");
4111} 4077}
4112 4078
4113static int niu_device_error(struct niu *np) 4079static int niu_device_error(struct niu *np)
4114{ 4080{
4115 u64 stat = nr64(SYS_ERR_STAT); 4081 u64 stat = nr64(SYS_ERR_STAT);
4116 4082
4117 dev_err(np->device, PFX "%s: Core device error, stat[%llx]\n", 4083 netdev_err(np->dev, "Core device error, stat[%llx]\n",
4118 np->dev->name, (unsigned long long) stat); 4084 (unsigned long long)stat);
4119 4085
4120 niu_log_device_error(np, stat); 4086 niu_log_device_error(np, stat);
4121 4087
@@ -4197,8 +4163,8 @@ static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
4197 RX_DMA_CTL_STAT_RCRTO); 4163 RX_DMA_CTL_STAT_RCRTO);
4198 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write); 4164 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
4199 4165
4200 niudbg(INTR, "%s: rxchan_intr stat[%llx]\n", 4166 netif_printk(np, intr, KERN_DEBUG, np->dev,
4201 np->dev->name, (unsigned long long) stat); 4167 "%s() stat[%llx]\n", __func__, (unsigned long long)stat);
4202} 4168}
4203 4169
4204static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp, 4170static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
@@ -4206,8 +4172,8 @@ static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
4206{ 4172{
4207 rp->tx_cs = nr64(TX_CS(rp->tx_channel)); 4173 rp->tx_cs = nr64(TX_CS(rp->tx_channel));
4208 4174
4209 niudbg(INTR, "%s: txchan_intr cs[%llx]\n", 4175 netif_printk(np, intr, KERN_DEBUG, np->dev,
4210 np->dev->name, (unsigned long long) rp->tx_cs); 4176 "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs);
4211} 4177}
4212 4178
4213static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0) 4179static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
@@ -4265,8 +4231,8 @@ static irqreturn_t niu_interrupt(int irq, void *dev_id)
4265 u64 v0, v1, v2; 4231 u64 v0, v1, v2;
4266 4232
4267 if (netif_msg_intr(np)) 4233 if (netif_msg_intr(np))
4268 printk(KERN_DEBUG PFX "niu_interrupt() ldg[%p](%d) ", 4234 printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)",
4269 lp, ldg); 4235 __func__, lp, ldg);
4270 4236
4271 spin_lock_irqsave(&np->lock, flags); 4237 spin_lock_irqsave(&np->lock, flags);
4272 4238
@@ -4275,7 +4241,7 @@ static irqreturn_t niu_interrupt(int irq, void *dev_id)
4275 v2 = nr64(LDSV2(ldg)); 4241 v2 = nr64(LDSV2(ldg));
4276 4242
4277 if (netif_msg_intr(np)) 4243 if (netif_msg_intr(np))
4278 printk("v0[%llx] v1[%llx] v2[%llx]\n", 4244 pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n",
4279 (unsigned long long) v0, 4245 (unsigned long long) v0,
4280 (unsigned long long) v1, 4246 (unsigned long long) v1,
4281 (unsigned long long) v2); 4247 (unsigned long long) v2);
@@ -4400,8 +4366,8 @@ static int niu_alloc_rx_ring_info(struct niu *np,
4400 if (!rp->mbox) 4366 if (!rp->mbox)
4401 return -ENOMEM; 4367 return -ENOMEM;
4402 if ((unsigned long)rp->mbox & (64UL - 1)) { 4368 if ((unsigned long)rp->mbox & (64UL - 1)) {
4403 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " 4369 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
4404 "RXDMA mailbox %p\n", np->dev->name, rp->mbox); 4370 rp->mbox);
4405 return -EINVAL; 4371 return -EINVAL;
4406 } 4372 }
4407 4373
@@ -4411,8 +4377,8 @@ static int niu_alloc_rx_ring_info(struct niu *np,
4411 if (!rp->rcr) 4377 if (!rp->rcr)
4412 return -ENOMEM; 4378 return -ENOMEM;
4413 if ((unsigned long)rp->rcr & (64UL - 1)) { 4379 if ((unsigned long)rp->rcr & (64UL - 1)) {
4414 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " 4380 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
4415 "RXDMA RCR table %p\n", np->dev->name, rp->rcr); 4381 rp->rcr);
4416 return -EINVAL; 4382 return -EINVAL;
4417 } 4383 }
4418 rp->rcr_table_size = MAX_RCR_RING_SIZE; 4384 rp->rcr_table_size = MAX_RCR_RING_SIZE;
@@ -4424,8 +4390,8 @@ static int niu_alloc_rx_ring_info(struct niu *np,
4424 if (!rp->rbr) 4390 if (!rp->rbr)
4425 return -ENOMEM; 4391 return -ENOMEM;
4426 if ((unsigned long)rp->rbr & (64UL - 1)) { 4392 if ((unsigned long)rp->rbr & (64UL - 1)) {
4427 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " 4393 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
4428 "RXDMA RBR table %p\n", np->dev->name, rp->rbr); 4394 rp->rbr);
4429 return -EINVAL; 4395 return -EINVAL;
4430 } 4396 }
4431 rp->rbr_table_size = MAX_RBR_RING_SIZE; 4397 rp->rbr_table_size = MAX_RBR_RING_SIZE;
@@ -4458,8 +4424,8 @@ static int niu_alloc_tx_ring_info(struct niu *np,
4458 if (!rp->mbox) 4424 if (!rp->mbox)
4459 return -ENOMEM; 4425 return -ENOMEM;
4460 if ((unsigned long)rp->mbox & (64UL - 1)) { 4426 if ((unsigned long)rp->mbox & (64UL - 1)) {
4461 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " 4427 netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
4462 "TXDMA mailbox %p\n", np->dev->name, rp->mbox); 4428 rp->mbox);
4463 return -EINVAL; 4429 return -EINVAL;
4464 } 4430 }
4465 4431
@@ -4469,8 +4435,8 @@ static int niu_alloc_tx_ring_info(struct niu *np,
4469 if (!rp->descr) 4435 if (!rp->descr)
4470 return -ENOMEM; 4436 return -ENOMEM;
4471 if ((unsigned long)rp->descr & (64UL - 1)) { 4437 if ((unsigned long)rp->descr & (64UL - 1)) {
4472 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " 4438 netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n",
4473 "TXDMA descr table %p\n", np->dev->name, rp->descr); 4439 rp->descr);
4474 return -EINVAL; 4440 return -EINVAL;
4475 } 4441 }
4476 4442
@@ -4726,10 +4692,8 @@ static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
4726 4692
4727 if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE | 4693 if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
4728 TX_RNG_CFIG_STADDR)) { 4694 TX_RNG_CFIG_STADDR)) {
4729 dev_err(np->device, PFX "%s: TX ring channel %d " 4695 netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n",
4730 "DMA addr (%llx) is not aligned.\n", 4696 channel, (unsigned long long)rp->descr_dma);
4731 np->dev->name, channel,
4732 (unsigned long long) rp->descr_dma);
4733 return -EINVAL; 4697 return -EINVAL;
4734 } 4698 }
4735 4699
@@ -4746,10 +4710,8 @@ static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
4746 4710
4747 if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) || 4711 if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
4748 ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) { 4712 ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
4749 dev_err(np->device, PFX "%s: TX ring channel %d " 4713 netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
4750 "MBOX addr (%llx) is has illegal bits.\n", 4714 channel, (unsigned long long)rp->mbox_dma);
4751 np->dev->name, channel,
4752 (unsigned long long) rp->mbox_dma);
4753 return -EINVAL; 4715 return -EINVAL;
4754 } 4716 }
4755 nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32); 4717 nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
@@ -5146,9 +5108,8 @@ static int niu_zcp_read(struct niu *np, int index, u64 *data)
5146 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 5108 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
5147 1000, 100); 5109 1000, 100);
5148 if (err) { 5110 if (err) {
5149 dev_err(np->device, PFX "%s: ZCP read busy won't clear, " 5111 netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
5150 "ZCP_RAM_ACC[%llx]\n", np->dev->name, 5112 (unsigned long long)nr64(ZCP_RAM_ACC));
5151 (unsigned long long) nr64(ZCP_RAM_ACC));
5152 return err; 5113 return err;
5153 } 5114 }
5154 5115
@@ -5160,9 +5121,8 @@ static int niu_zcp_read(struct niu *np, int index, u64 *data)
5160 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 5121 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
5161 1000, 100); 5122 1000, 100);
5162 if (err) { 5123 if (err) {
5163 dev_err(np->device, PFX "%s: ZCP read busy2 won't clear, " 5124 netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
5164 "ZCP_RAM_ACC[%llx]\n", np->dev->name, 5125 (unsigned long long)nr64(ZCP_RAM_ACC));
5165 (unsigned long long) nr64(ZCP_RAM_ACC));
5166 return err; 5126 return err;
5167 } 5127 }
5168 5128
@@ -5527,8 +5487,7 @@ static int niu_reset_tx_bmac(struct niu *np)
5527 udelay(100); 5487 udelay(100);
5528 } 5488 }
5529 if (limit < 0) { 5489 if (limit < 0) {
5530 dev_err(np->device, PFX "Port %u TX BMAC would not reset, " 5490 dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
5531 "BTXMAC_SW_RST[%llx]\n",
5532 np->port, 5491 np->port,
5533 (unsigned long long) nr64_mac(BTXMAC_SW_RST)); 5492 (unsigned long long) nr64_mac(BTXMAC_SW_RST));
5534 return -ENODEV; 5493 return -ENODEV;
@@ -5629,12 +5588,11 @@ static int niu_reset_rx_xmac(struct niu *np)
5629 while (--limit >= 0) { 5588 while (--limit >= 0) {
5630 if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS | 5589 if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
5631 XRXMAC_SW_RST_SOFT_RST))) 5590 XRXMAC_SW_RST_SOFT_RST)))
5632 break; 5591 break;
5633 udelay(100); 5592 udelay(100);
5634 } 5593 }
5635 if (limit < 0) { 5594 if (limit < 0) {
5636 dev_err(np->device, PFX "Port %u RX XMAC would not reset, " 5595 dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
5637 "XRXMAC_SW_RST[%llx]\n",
5638 np->port, 5596 np->port,
5639 (unsigned long long) nr64_mac(XRXMAC_SW_RST)); 5597 (unsigned long long) nr64_mac(XRXMAC_SW_RST));
5640 return -ENODEV; 5598 return -ENODEV;
@@ -5655,8 +5613,7 @@ static int niu_reset_rx_bmac(struct niu *np)
5655 udelay(100); 5613 udelay(100);
5656 } 5614 }
5657 if (limit < 0) { 5615 if (limit < 0) {
5658 dev_err(np->device, PFX "Port %u RX BMAC would not reset, " 5616 dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
5659 "BRXMAC_SW_RST[%llx]\n",
5660 np->port, 5617 np->port,
5661 (unsigned long long) nr64_mac(BRXMAC_SW_RST)); 5618 (unsigned long long) nr64_mac(BRXMAC_SW_RST));
5662 return -ENODEV; 5619 return -ENODEV;
@@ -5960,11 +5917,9 @@ static void niu_disable_ipp(struct niu *np)
5960 } 5917 }
5961 if (limit < 0 && 5918 if (limit < 0 &&
5962 (rd != 0 && wr != 1)) { 5919 (rd != 0 && wr != 1)) {
5963 dev_err(np->device, PFX "%s: IPP would not quiesce, " 5920 netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
5964 "rd_ptr[%llx] wr_ptr[%llx]\n", 5921 (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR),
5965 np->dev->name, 5922 (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR));
5966 (unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR),
5967 (unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR));
5968 } 5923 }
5969 5924
5970 val = nr64_ipp(IPP_CFIG); 5925 val = nr64_ipp(IPP_CFIG);
@@ -5981,12 +5936,12 @@ static int niu_init_hw(struct niu *np)
5981{ 5936{
5982 int i, err; 5937 int i, err;
5983 5938
5984 niudbg(IFUP, "%s: Initialize TXC\n", np->dev->name); 5939 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n");
5985 niu_txc_enable_port(np, 1); 5940 niu_txc_enable_port(np, 1);
5986 niu_txc_port_dma_enable(np, 1); 5941 niu_txc_port_dma_enable(np, 1);
5987 niu_txc_set_imask(np, 0); 5942 niu_txc_set_imask(np, 0);
5988 5943
5989 niudbg(IFUP, "%s: Initialize TX channels\n", np->dev->name); 5944 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n");
5990 for (i = 0; i < np->num_tx_rings; i++) { 5945 for (i = 0; i < np->num_tx_rings; i++) {
5991 struct tx_ring_info *rp = &np->tx_rings[i]; 5946 struct tx_ring_info *rp = &np->tx_rings[i];
5992 5947
@@ -5995,27 +5950,27 @@ static int niu_init_hw(struct niu *np)
5995 return err; 5950 return err;
5996 } 5951 }
5997 5952
5998 niudbg(IFUP, "%s: Initialize RX channels\n", np->dev->name); 5953 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n");
5999 err = niu_init_rx_channels(np); 5954 err = niu_init_rx_channels(np);
6000 if (err) 5955 if (err)
6001 goto out_uninit_tx_channels; 5956 goto out_uninit_tx_channels;
6002 5957
6003 niudbg(IFUP, "%s: Initialize classifier\n", np->dev->name); 5958 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n");
6004 err = niu_init_classifier_hw(np); 5959 err = niu_init_classifier_hw(np);
6005 if (err) 5960 if (err)
6006 goto out_uninit_rx_channels; 5961 goto out_uninit_rx_channels;
6007 5962
6008 niudbg(IFUP, "%s: Initialize ZCP\n", np->dev->name); 5963 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n");
6009 err = niu_init_zcp(np); 5964 err = niu_init_zcp(np);
6010 if (err) 5965 if (err)
6011 goto out_uninit_rx_channels; 5966 goto out_uninit_rx_channels;
6012 5967
6013 niudbg(IFUP, "%s: Initialize IPP\n", np->dev->name); 5968 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n");
6014 err = niu_init_ipp(np); 5969 err = niu_init_ipp(np);
6015 if (err) 5970 if (err)
6016 goto out_uninit_rx_channels; 5971 goto out_uninit_rx_channels;
6017 5972
6018 niudbg(IFUP, "%s: Initialize MAC\n", np->dev->name); 5973 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n");
6019 err = niu_init_mac(np); 5974 err = niu_init_mac(np);
6020 if (err) 5975 if (err)
6021 goto out_uninit_ipp; 5976 goto out_uninit_ipp;
@@ -6023,16 +5978,16 @@ static int niu_init_hw(struct niu *np)
6023 return 0; 5978 return 0;
6024 5979
6025out_uninit_ipp: 5980out_uninit_ipp:
6026 niudbg(IFUP, "%s: Uninit IPP\n", np->dev->name); 5981 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n");
6027 niu_disable_ipp(np); 5982 niu_disable_ipp(np);
6028 5983
6029out_uninit_rx_channels: 5984out_uninit_rx_channels:
6030 niudbg(IFUP, "%s: Uninit RX channels\n", np->dev->name); 5985 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n");
6031 niu_stop_rx_channels(np); 5986 niu_stop_rx_channels(np);
6032 niu_reset_rx_channels(np); 5987 niu_reset_rx_channels(np);
6033 5988
6034out_uninit_tx_channels: 5989out_uninit_tx_channels:
6035 niudbg(IFUP, "%s: Uninit TX channels\n", np->dev->name); 5990 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n");
6036 niu_stop_tx_channels(np); 5991 niu_stop_tx_channels(np);
6037 niu_reset_tx_channels(np); 5992 niu_reset_tx_channels(np);
6038 5993
@@ -6041,25 +5996,25 @@ out_uninit_tx_channels:
6041 5996
6042static void niu_stop_hw(struct niu *np) 5997static void niu_stop_hw(struct niu *np)
6043{ 5998{
6044 niudbg(IFDOWN, "%s: Disable interrupts\n", np->dev->name); 5999 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n");
6045 niu_enable_interrupts(np, 0); 6000 niu_enable_interrupts(np, 0);
6046 6001
6047 niudbg(IFDOWN, "%s: Disable RX MAC\n", np->dev->name); 6002 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n");
6048 niu_enable_rx_mac(np, 0); 6003 niu_enable_rx_mac(np, 0);
6049 6004
6050 niudbg(IFDOWN, "%s: Disable IPP\n", np->dev->name); 6005 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n");
6051 niu_disable_ipp(np); 6006 niu_disable_ipp(np);
6052 6007
6053 niudbg(IFDOWN, "%s: Stop TX channels\n", np->dev->name); 6008 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n");
6054 niu_stop_tx_channels(np); 6009 niu_stop_tx_channels(np);
6055 6010
6056 niudbg(IFDOWN, "%s: Stop RX channels\n", np->dev->name); 6011 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n");
6057 niu_stop_rx_channels(np); 6012 niu_stop_rx_channels(np);
6058 6013
6059 niudbg(IFDOWN, "%s: Reset TX channels\n", np->dev->name); 6014 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n");
6060 niu_reset_tx_channels(np); 6015 niu_reset_tx_channels(np);
6061 6016
6062 niudbg(IFDOWN, "%s: Reset RX channels\n", np->dev->name); 6017 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n");
6063 niu_reset_rx_channels(np); 6018 niu_reset_rx_channels(np);
6064} 6019}
6065 6020
@@ -6369,10 +6324,10 @@ static void niu_set_rx_mode(struct net_device *dev)
6369 np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC); 6324 np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
6370 if (dev->flags & IFF_PROMISC) 6325 if (dev->flags & IFF_PROMISC)
6371 np->flags |= NIU_FLAGS_PROMISC; 6326 np->flags |= NIU_FLAGS_PROMISC;
6372 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0)) 6327 if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev)))
6373 np->flags |= NIU_FLAGS_MCAST; 6328 np->flags |= NIU_FLAGS_MCAST;
6374 6329
6375 alt_cnt = dev->uc.count; 6330 alt_cnt = netdev_uc_count(dev);
6376 if (alt_cnt > niu_num_alt_addr(np)) { 6331 if (alt_cnt > niu_num_alt_addr(np)) {
6377 alt_cnt = 0; 6332 alt_cnt = 0;
6378 np->flags |= NIU_FLAGS_PROMISC; 6333 np->flags |= NIU_FLAGS_PROMISC;
@@ -6381,17 +6336,15 @@ static void niu_set_rx_mode(struct net_device *dev)
6381 if (alt_cnt) { 6336 if (alt_cnt) {
6382 int index = 0; 6337 int index = 0;
6383 6338
6384 list_for_each_entry(ha, &dev->uc.list, list) { 6339 netdev_for_each_uc_addr(ha, dev) {
6385 err = niu_set_alt_mac(np, index, ha->addr); 6340 err = niu_set_alt_mac(np, index, ha->addr);
6386 if (err) 6341 if (err)
6387 printk(KERN_WARNING PFX "%s: Error %d " 6342 netdev_warn(dev, "Error %d adding alt mac %d\n",
6388 "adding alt mac %d\n", 6343 err, index);
6389 dev->name, err, index);
6390 err = niu_enable_alt_mac(np, index, 1); 6344 err = niu_enable_alt_mac(np, index, 1);
6391 if (err) 6345 if (err)
6392 printk(KERN_WARNING PFX "%s: Error %d " 6346 netdev_warn(dev, "Error %d enabling alt mac %d\n",
6393 "enabling alt mac %d\n", 6347 err, index);
6394 dev->name, err, index);
6395 6348
6396 index++; 6349 index++;
6397 } 6350 }
@@ -6404,16 +6357,15 @@ static void niu_set_rx_mode(struct net_device *dev)
6404 for (i = alt_start; i < niu_num_alt_addr(np); i++) { 6357 for (i = alt_start; i < niu_num_alt_addr(np); i++) {
6405 err = niu_enable_alt_mac(np, i, 0); 6358 err = niu_enable_alt_mac(np, i, 0);
6406 if (err) 6359 if (err)
6407 printk(KERN_WARNING PFX "%s: Error %d " 6360 netdev_warn(dev, "Error %d disabling alt mac %d\n",
6408 "disabling alt mac %d\n", 6361 err, i);
6409 dev->name, err, i);
6410 } 6362 }
6411 } 6363 }
6412 if (dev->flags & IFF_ALLMULTI) { 6364 if (dev->flags & IFF_ALLMULTI) {
6413 for (i = 0; i < 16; i++) 6365 for (i = 0; i < 16; i++)
6414 hash[i] = 0xffff; 6366 hash[i] = 0xffff;
6415 } else if (dev->mc_count > 0) { 6367 } else if (!netdev_mc_empty(dev)) {
6416 for (addr = dev->mc_list; addr; addr = addr->next) { 6368 netdev_for_each_mc_addr(addr, dev) {
6417 u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr); 6369 u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr);
6418 6370
6419 crc >>= 24; 6371 crc >>= 24;
@@ -6570,7 +6522,7 @@ static void niu_tx_timeout(struct net_device *dev)
6570{ 6522{
6571 struct niu *np = netdev_priv(dev); 6523 struct niu *np = netdev_priv(dev);
6572 6524
6573 dev_err(np->device, PFX "%s: Transmit timed out, resetting\n", 6525 dev_err(np->device, "%s: Transmit timed out, resetting\n",
6574 dev->name); 6526 dev->name);
6575 6527
6576 schedule_work(&np->reset_task); 6528 schedule_work(&np->reset_task);
@@ -6672,8 +6624,7 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
6672 6624
6673 if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) { 6625 if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
6674 netif_tx_stop_queue(txq); 6626 netif_tx_stop_queue(txq);
6675 dev_err(np->device, PFX "%s: BUG! Tx ring full when " 6627 dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name);
6676 "queue awake!\n", dev->name);
6677 rp->tx_errors++; 6628 rp->tx_errors++;
6678 return NETDEV_TX_BUSY; 6629 return NETDEV_TX_BUSY;
6679 } 6630 }
@@ -7237,8 +7188,8 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
7237 7188
7238 tp = &parent->tcam[idx]; 7189 tp = &parent->tcam[idx];
7239 if (!tp->valid) { 7190 if (!tp->valid) {
7240 pr_info(PFX "niu%d: %s entry [%d] invalid for idx[%d]\n", 7191 netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n",
7241 parent->index, np->dev->name, (u16)nfc->fs.location, idx); 7192 parent->index, (u16)nfc->fs.location, idx);
7242 return -EINVAL; 7193 return -EINVAL;
7243 } 7194 }
7244 7195
@@ -7248,8 +7199,8 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
7248 ret = niu_class_to_ethflow(class, &fsp->flow_type); 7199 ret = niu_class_to_ethflow(class, &fsp->flow_type);
7249 7200
7250 if (ret < 0) { 7201 if (ret < 0) {
7251 pr_info(PFX "niu%d: %s niu_class_to_ethflow failed\n", 7202 netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
7252 parent->index, np->dev->name); 7203 parent->index);
7253 ret = -EINVAL; 7204 ret = -EINVAL;
7254 goto out; 7205 goto out;
7255 } 7206 }
@@ -7332,9 +7283,8 @@ static int niu_get_ethtool_tcam_all(struct niu *np,
7332 7283
7333 if (n_entries != cnt) { 7284 if (n_entries != cnt) {
7334 /* print warning, this should not happen */ 7285 /* print warning, this should not happen */
7335 pr_info(PFX "niu%d: %s In niu_get_ethtool_tcam_all, " 7286 netdev_info(np->dev, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n",
7336 "n_entries[%d] != cnt[%d]!!!\n\n", 7287 np->parent->index, __func__, n_entries, cnt);
7337 np->parent->index, np->dev->name, n_entries, cnt);
7338 } 7288 }
7339 7289
7340 return 0; 7290 return 0;
@@ -7561,9 +7511,8 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
7561 } 7511 }
7562 } 7512 }
7563 if (!add_usr_cls) { 7513 if (!add_usr_cls) {
7564 pr_info(PFX "niu%d: %s niu_add_ethtool_tcam_entry: " 7514 netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n",
7565 "Could not find/insert class for pid %d\n", 7515 parent->index, __func__, uspec->proto);
7566 parent->index, np->dev->name, uspec->proto);
7567 ret = -EINVAL; 7516 ret = -EINVAL;
7568 goto out; 7517 goto out;
7569 } 7518 }
@@ -7596,9 +7545,8 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
7596 case AH_V6_FLOW: 7545 case AH_V6_FLOW:
7597 case ESP_V6_FLOW: 7546 case ESP_V6_FLOW:
7598 /* Not yet implemented */ 7547 /* Not yet implemented */
7599 pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: " 7548 netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
7600 "flow %d for IPv6 not implemented\n\n", 7549 parent->index, __func__, fsp->flow_type);
7601 parent->index, np->dev->name, fsp->flow_type);
7602 ret = -EINVAL; 7550 ret = -EINVAL;
7603 goto out; 7551 goto out;
7604 case IP_USER_FLOW: 7552 case IP_USER_FLOW:
@@ -7607,17 +7555,15 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
7607 class); 7555 class);
7608 } else { 7556 } else {
7609 /* Not yet implemented */ 7557 /* Not yet implemented */
7610 pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: " 7558 netdev_info(np->dev, "niu%d: In %s(): usr flow for IPv6 not implemented\n",
7611 "usr flow for IPv6 not implemented\n\n", 7559 parent->index, __func__);
7612 parent->index, np->dev->name);
7613 ret = -EINVAL; 7560 ret = -EINVAL;
7614 goto out; 7561 goto out;
7615 } 7562 }
7616 break; 7563 break;
7617 default: 7564 default:
7618 pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: " 7565 netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
7619 "Unknown flow type %d\n\n", 7566 parent->index, __func__, fsp->flow_type);
7620 parent->index, np->dev->name, fsp->flow_type);
7621 ret = -EINVAL; 7567 ret = -EINVAL;
7622 goto out; 7568 goto out;
7623 } 7569 }
@@ -7627,10 +7573,9 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
7627 tp->assoc_data = TCAM_ASSOCDATA_DISC; 7573 tp->assoc_data = TCAM_ASSOCDATA_DISC;
7628 } else { 7574 } else {
7629 if (fsp->ring_cookie >= np->num_rx_rings) { 7575 if (fsp->ring_cookie >= np->num_rx_rings) {
7630 pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: " 7576 netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n",
7631 "Invalid RX ring %lld\n\n", 7577 parent->index, __func__,
7632 parent->index, np->dev->name, 7578 (long long)fsp->ring_cookie);
7633 (long long) fsp->ring_cookie);
7634 ret = -EINVAL; 7579 ret = -EINVAL;
7635 goto out; 7580 goto out;
7636 } 7581 }
@@ -7699,10 +7644,9 @@ static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc)
7699 } 7644 }
7700 } 7645 }
7701 if (i == NIU_L3_PROG_CLS) { 7646 if (i == NIU_L3_PROG_CLS) {
7702 pr_info(PFX "niu%d: %s In niu_del_ethtool_tcam_entry," 7647 netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n",
7703 "Usr class 0x%llx not found \n", 7648 parent->index, __func__,
7704 parent->index, np->dev->name, 7649 (unsigned long long)class);
7705 (unsigned long long) class);
7706 ret = -EINVAL; 7650 ret = -EINVAL;
7707 goto out; 7651 goto out;
7708 } 7652 }
@@ -8001,9 +7945,7 @@ static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
8001 * won't get any interrupts and that's painful to debug. 7945 * won't get any interrupts and that's painful to debug.
8002 */ 7946 */
8003 if (nr64(LDG_NUM(ldn)) != ldg) { 7947 if (nr64(LDG_NUM(ldn)) != ldg) {
8004 dev_err(np->device, PFX "Port %u, mis-matched " 7948 dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
8005 "LDG assignment "
8006 "for ldn %d, should be %d is %llu\n",
8007 np->port, ldn, ldg, 7949 np->port, ldn, ldg,
8008 (unsigned long long) nr64(LDG_NUM(ldn))); 7950 (unsigned long long) nr64(LDG_NUM(ldn)));
8009 return -EINVAL; 7951 return -EINVAL;
@@ -8056,7 +7998,7 @@ static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr)
8056 break; 7998 break;
8057 } while (limit--); 7999 } while (limit--);
8058 if (!(frame & ESPC_PIO_STAT_READ_END)) { 8000 if (!(frame & ESPC_PIO_STAT_READ_END)) {
8059 dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n", 8001 dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
8060 (unsigned long long) frame); 8002 (unsigned long long) frame);
8061 return -ENODEV; 8003 return -ENODEV;
8062 } 8004 }
@@ -8071,7 +8013,7 @@ static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr)
8071 break; 8013 break;
8072 } while (limit--); 8014 } while (limit--);
8073 if (!(frame & ESPC_PIO_STAT_READ_END)) { 8015 if (!(frame & ESPC_PIO_STAT_READ_END)) {
8074 dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n", 8016 dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
8075 (unsigned long long) frame); 8017 (unsigned long long) frame);
8076 return -ENODEV; 8018 return -ENODEV;
8077 } 8019 }
@@ -8152,8 +8094,9 @@ static void __devinit niu_vpd_parse_version(struct niu *np)
8152 s += i + 5; 8094 s += i + 5;
8153 sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor); 8095 sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor);
8154 8096
8155 niudbg(PROBE, "VPD_SCAN: FCODE major(%d) minor(%d)\n", 8097 netif_printk(np, probe, KERN_DEBUG, np->dev,
8156 vpd->fcode_major, vpd->fcode_minor); 8098 "VPD_SCAN: FCODE major(%d) minor(%d)\n",
8099 vpd->fcode_major, vpd->fcode_minor);
8157 if (vpd->fcode_major > NIU_VPD_MIN_MAJOR || 8100 if (vpd->fcode_major > NIU_VPD_MIN_MAJOR ||
8158 (vpd->fcode_major == NIU_VPD_MIN_MAJOR && 8101 (vpd->fcode_major == NIU_VPD_MIN_MAJOR &&
8159 vpd->fcode_minor >= NIU_VPD_MIN_MINOR)) 8102 vpd->fcode_minor >= NIU_VPD_MIN_MINOR))
@@ -8173,8 +8116,8 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np,
8173#define FOUND_MASK_PHY 0x00000020 8116#define FOUND_MASK_PHY 0x00000020
8174#define FOUND_MASK_ALL 0x0000003f 8117#define FOUND_MASK_ALL 0x0000003f
8175 8118
8176 niudbg(PROBE, "VPD_SCAN: start[%x] end[%x]\n", 8119 netif_printk(np, probe, KERN_DEBUG, np->dev,
8177 start, end); 8120 "VPD_SCAN: start[%x] end[%x]\n", start, end);
8178 while (start < end) { 8121 while (start < end) {
8179 int len, err, instance, type, prop_len; 8122 int len, err, instance, type, prop_len;
8180 char namebuf[64]; 8123 char namebuf[64];
@@ -8228,8 +8171,7 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np,
8228 } 8171 }
8229 8172
8230 if (max_len && prop_len > max_len) { 8173 if (max_len && prop_len > max_len) {
8231 dev_err(np->device, PFX "Property '%s' length (%d) is " 8174 dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len);
8232 "too long.\n", namebuf, prop_len);
8233 return -EINVAL; 8175 return -EINVAL;
8234 } 8176 }
8235 8177
@@ -8237,8 +8179,9 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np,
8237 u32 off = start + 5 + err; 8179 u32 off = start + 5 + err;
8238 int i; 8180 int i;
8239 8181
8240 niudbg(PROBE, "VPD_SCAN: Reading in property [%s] " 8182 netif_printk(np, probe, KERN_DEBUG, np->dev,
8241 "len[%d]\n", namebuf, prop_len); 8183 "VPD_SCAN: Reading in property [%s] len[%d]\n",
8184 namebuf, prop_len);
8242 for (i = 0; i < prop_len; i++) 8185 for (i = 0; i < prop_len; i++)
8243 *prop_buf++ = niu_pci_eeprom_read(np, off + i); 8186 *prop_buf++ = niu_pci_eeprom_read(np, off + i);
8244 } 8187 }
@@ -8402,8 +8345,7 @@ static void __devinit niu_pci_vpd_validate(struct niu *np)
8402 u8 val8; 8345 u8 val8;
8403 8346
8404 if (!is_valid_ether_addr(&vpd->local_mac[0])) { 8347 if (!is_valid_ether_addr(&vpd->local_mac[0])) {
8405 dev_err(np->device, PFX "VPD MAC invalid, " 8348 dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n");
8406 "falling back to SPROM.\n");
8407 8349
8408 np->flags &= ~NIU_FLAGS_VPD_VALID; 8350 np->flags &= ~NIU_FLAGS_VPD_VALID;
8409 return; 8351 return;
@@ -8420,14 +8362,14 @@ static void __devinit niu_pci_vpd_validate(struct niu *np)
8420 np->flags &= ~NIU_FLAGS_10G; 8362 np->flags &= ~NIU_FLAGS_10G;
8421 } 8363 }
8422 if (np->flags & NIU_FLAGS_10G) 8364 if (np->flags & NIU_FLAGS_10G)
8423 np->mac_xcvr = MAC_XCVR_XPCS; 8365 np->mac_xcvr = MAC_XCVR_XPCS;
8424 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { 8366 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
8425 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | 8367 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
8426 NIU_FLAGS_HOTPLUG_PHY); 8368 NIU_FLAGS_HOTPLUG_PHY);
8427 } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { 8369 } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
8428 dev_err(np->device, PFX "Illegal phy string [%s].\n", 8370 dev_err(np->device, "Illegal phy string [%s]\n",
8429 np->vpd.phy_type); 8371 np->vpd.phy_type);
8430 dev_err(np->device, PFX "Falling back to SPROM.\n"); 8372 dev_err(np->device, "Falling back to SPROM\n");
8431 np->flags &= ~NIU_FLAGS_VPD_VALID; 8373 np->flags &= ~NIU_FLAGS_VPD_VALID;
8432 return; 8374 return;
8433 } 8375 }
@@ -8455,7 +8397,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
8455 8397
8456 np->eeprom_len = len; 8398 np->eeprom_len = len;
8457 8399
8458 niudbg(PROBE, "SPROM: Image size %llu\n", (unsigned long long) val); 8400 netif_printk(np, probe, KERN_DEBUG, np->dev,
8401 "SPROM: Image size %llu\n", (unsigned long long)val);
8459 8402
8460 sum = 0; 8403 sum = 0;
8461 for (i = 0; i < len; i++) { 8404 for (i = 0; i < len; i++) {
@@ -8465,10 +8408,10 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
8465 sum += (val >> 16) & 0xff; 8408 sum += (val >> 16) & 0xff;
8466 sum += (val >> 24) & 0xff; 8409 sum += (val >> 24) & 0xff;
8467 } 8410 }
8468 niudbg(PROBE, "SPROM: Checksum %x\n", (int)(sum & 0xff)); 8411 netif_printk(np, probe, KERN_DEBUG, np->dev,
8412 "SPROM: Checksum %x\n", (int)(sum & 0xff));
8469 if ((sum & 0xff) != 0xab) { 8413 if ((sum & 0xff) != 0xab) {
8470 dev_err(np->device, PFX "Bad SPROM checksum " 8414 dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff));
8471 "(%x, should be 0xab)\n", (int) (sum & 0xff));
8472 return -EINVAL; 8415 return -EINVAL;
8473 } 8416 }
8474 8417
@@ -8491,11 +8434,12 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
8491 ESPC_PHY_TYPE_PORT3_SHIFT; 8434 ESPC_PHY_TYPE_PORT3_SHIFT;
8492 break; 8435 break;
8493 default: 8436 default:
8494 dev_err(np->device, PFX "Bogus port number %u\n", 8437 dev_err(np->device, "Bogus port number %u\n",
8495 np->port); 8438 np->port);
8496 return -EINVAL; 8439 return -EINVAL;
8497 } 8440 }
8498 niudbg(PROBE, "SPROM: PHY type %x\n", val8); 8441 netif_printk(np, probe, KERN_DEBUG, np->dev,
8442 "SPROM: PHY type %x\n", val8);
8499 8443
8500 switch (val8) { 8444 switch (val8) {
8501 case ESPC_PHY_TYPE_1G_COPPER: 8445 case ESPC_PHY_TYPE_1G_COPPER:
@@ -8527,30 +8471,27 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
8527 break; 8471 break;
8528 8472
8529 default: 8473 default:
8530 dev_err(np->device, PFX "Bogus SPROM phy type %u\n", val8); 8474 dev_err(np->device, "Bogus SPROM phy type %u\n", val8);
8531 return -EINVAL; 8475 return -EINVAL;
8532 } 8476 }
8533 8477
8534 val = nr64(ESPC_MAC_ADDR0); 8478 val = nr64(ESPC_MAC_ADDR0);
8535 niudbg(PROBE, "SPROM: MAC_ADDR0[%08llx]\n", 8479 netif_printk(np, probe, KERN_DEBUG, np->dev,
8536 (unsigned long long) val); 8480 "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
8537 dev->perm_addr[0] = (val >> 0) & 0xff; 8481 dev->perm_addr[0] = (val >> 0) & 0xff;
8538 dev->perm_addr[1] = (val >> 8) & 0xff; 8482 dev->perm_addr[1] = (val >> 8) & 0xff;
8539 dev->perm_addr[2] = (val >> 16) & 0xff; 8483 dev->perm_addr[2] = (val >> 16) & 0xff;
8540 dev->perm_addr[3] = (val >> 24) & 0xff; 8484 dev->perm_addr[3] = (val >> 24) & 0xff;
8541 8485
8542 val = nr64(ESPC_MAC_ADDR1); 8486 val = nr64(ESPC_MAC_ADDR1);
8543 niudbg(PROBE, "SPROM: MAC_ADDR1[%08llx]\n", 8487 netif_printk(np, probe, KERN_DEBUG, np->dev,
8544 (unsigned long long) val); 8488 "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
8545 dev->perm_addr[4] = (val >> 0) & 0xff; 8489 dev->perm_addr[4] = (val >> 0) & 0xff;
8546 dev->perm_addr[5] = (val >> 8) & 0xff; 8490 dev->perm_addr[5] = (val >> 8) & 0xff;
8547 8491
8548 if (!is_valid_ether_addr(&dev->perm_addr[0])) { 8492 if (!is_valid_ether_addr(&dev->perm_addr[0])) {
8549 dev_err(np->device, PFX "SPROM MAC address invalid\n"); 8493 dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
8550 dev_err(np->device, PFX "[ \n"); 8494 dev->perm_addr);
8551 for (i = 0; i < 6; i++)
8552 printk("%02x ", dev->perm_addr[i]);
8553 printk("]\n");
8554 return -EINVAL; 8495 return -EINVAL;
8555 } 8496 }
8556 8497
@@ -8562,8 +8503,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
8562 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); 8503 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
8563 8504
8564 val = nr64(ESPC_MOD_STR_LEN); 8505 val = nr64(ESPC_MOD_STR_LEN);
8565 niudbg(PROBE, "SPROM: MOD_STR_LEN[%llu]\n", 8506 netif_printk(np, probe, KERN_DEBUG, np->dev,
8566 (unsigned long long) val); 8507 "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val);
8567 if (val >= 8 * 4) 8508 if (val >= 8 * 4)
8568 return -EINVAL; 8509 return -EINVAL;
8569 8510
@@ -8578,8 +8519,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
8578 np->vpd.model[val] = '\0'; 8519 np->vpd.model[val] = '\0';
8579 8520
8580 val = nr64(ESPC_BD_MOD_STR_LEN); 8521 val = nr64(ESPC_BD_MOD_STR_LEN);
8581 niudbg(PROBE, "SPROM: BD_MOD_STR_LEN[%llu]\n", 8522 netif_printk(np, probe, KERN_DEBUG, np->dev,
8582 (unsigned long long) val); 8523 "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val);
8583 if (val >= 4 * 4) 8524 if (val >= 4 * 4)
8584 return -EINVAL; 8525 return -EINVAL;
8585 8526
@@ -8595,8 +8536,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
8595 8536
8596 np->vpd.mac_num = 8537 np->vpd.mac_num =
8597 nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL; 8538 nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL;
8598 niudbg(PROBE, "SPROM: NUM_PORTS_MACS[%d]\n", 8539 netif_printk(np, probe, KERN_DEBUG, np->dev,
8599 np->vpd.mac_num); 8540 "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num);
8600 8541
8601 return 0; 8542 return 0;
8602} 8543}
@@ -8629,8 +8570,6 @@ static int __devinit niu_get_and_validate_port(struct niu *np)
8629 } 8570 }
8630 } 8571 }
8631 8572
8632 niudbg(PROBE, "niu_get_and_validate_port: port[%d] num_ports[%d]\n",
8633 np->port, parent->num_ports);
8634 if (np->port >= parent->num_ports) 8573 if (np->port >= parent->num_ports)
8635 return -ENODEV; 8574 return -ENODEV;
8636 8575
@@ -8659,14 +8598,12 @@ static int __devinit phy_record(struct niu_parent *parent,
8659 8598
8660 pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n", 8599 pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
8661 parent->index, id, 8600 parent->index, id,
8662 (type == PHY_TYPE_PMA_PMD ? 8601 type == PHY_TYPE_PMA_PMD ? "PMA/PMD" :
8663 "PMA/PMD" : 8602 type == PHY_TYPE_PCS ? "PCS" : "MII",
8664 (type == PHY_TYPE_PCS ?
8665 "PCS" : "MII")),
8666 phy_port); 8603 phy_port);
8667 8604
8668 if (p->cur[type] >= NIU_MAX_PORTS) { 8605 if (p->cur[type] >= NIU_MAX_PORTS) {
8669 printk(KERN_ERR PFX "Too many PHY ports.\n"); 8606 pr_err("Too many PHY ports\n");
8670 return -EINVAL; 8607 return -EINVAL;
8671 } 8608 }
8672 idx = p->cur[type]; 8609 idx = p->cur[type];
@@ -8727,8 +8664,7 @@ static void __devinit niu_n2_divide_channels(struct niu_parent *parent)
8727 parent->rxchan_per_port[i] = (16 / num_ports); 8664 parent->rxchan_per_port[i] = (16 / num_ports);
8728 parent->txchan_per_port[i] = (16 / num_ports); 8665 parent->txchan_per_port[i] = (16 / num_ports);
8729 8666
8730 pr_info(PFX "niu%d: Port %u [%u RX chans] " 8667 pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
8731 "[%u TX chans]\n",
8732 parent->index, i, 8668 parent->index, i,
8733 parent->rxchan_per_port[i], 8669 parent->rxchan_per_port[i],
8734 parent->txchan_per_port[i]); 8670 parent->txchan_per_port[i]);
@@ -8771,8 +8707,7 @@ static void __devinit niu_divide_channels(struct niu_parent *parent,
8771 parent->rxchan_per_port[i] = rx_chans_per_1g; 8707 parent->rxchan_per_port[i] = rx_chans_per_1g;
8772 parent->txchan_per_port[i] = tx_chans_per_1g; 8708 parent->txchan_per_port[i] = tx_chans_per_1g;
8773 } 8709 }
8774 pr_info(PFX "niu%d: Port %u [%u RX chans] " 8710 pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
8775 "[%u TX chans]\n",
8776 parent->index, i, 8711 parent->index, i,
8777 parent->rxchan_per_port[i], 8712 parent->rxchan_per_port[i],
8778 parent->txchan_per_port[i]); 8713 parent->txchan_per_port[i]);
@@ -8781,23 +8716,20 @@ static void __devinit niu_divide_channels(struct niu_parent *parent,
8781 } 8716 }
8782 8717
8783 if (tot_rx > NIU_NUM_RXCHAN) { 8718 if (tot_rx > NIU_NUM_RXCHAN) {
8784 printk(KERN_ERR PFX "niu%d: Too many RX channels (%d), " 8719 pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n",
8785 "resetting to one per port.\n",
8786 parent->index, tot_rx); 8720 parent->index, tot_rx);
8787 for (i = 0; i < num_ports; i++) 8721 for (i = 0; i < num_ports; i++)
8788 parent->rxchan_per_port[i] = 1; 8722 parent->rxchan_per_port[i] = 1;
8789 } 8723 }
8790 if (tot_tx > NIU_NUM_TXCHAN) { 8724 if (tot_tx > NIU_NUM_TXCHAN) {
8791 printk(KERN_ERR PFX "niu%d: Too many TX channels (%d), " 8725 pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n",
8792 "resetting to one per port.\n",
8793 parent->index, tot_tx); 8726 parent->index, tot_tx);
8794 for (i = 0; i < num_ports; i++) 8727 for (i = 0; i < num_ports; i++)
8795 parent->txchan_per_port[i] = 1; 8728 parent->txchan_per_port[i] = 1;
8796 } 8729 }
8797 if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) { 8730 if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
8798 printk(KERN_WARNING PFX "niu%d: Driver bug, wasted channels, " 8731 pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
8799 "RX[%d] TX[%d]\n", 8732 parent->index, tot_rx, tot_tx);
8800 parent->index, tot_rx, tot_tx);
8801 } 8733 }
8802} 8734}
8803 8735
@@ -8825,18 +8757,18 @@ static void __devinit niu_divide_rdc_groups(struct niu_parent *parent,
8825 struct rdc_table *rt = &tp->tables[grp]; 8757 struct rdc_table *rt = &tp->tables[grp];
8826 int slot; 8758 int slot;
8827 8759
8828 pr_info(PFX "niu%d: Port %d RDC tbl(%d) [ ", 8760 pr_info("niu%d: Port %d RDC tbl(%d) [ ",
8829 parent->index, i, tp->first_table_num + grp); 8761 parent->index, i, tp->first_table_num + grp);
8830 for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) { 8762 for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) {
8831 rt->rxdma_channel[slot] = 8763 rt->rxdma_channel[slot] =
8832 rdc_channel_base + this_channel_offset; 8764 rdc_channel_base + this_channel_offset;
8833 8765
8834 printk("%d ", rt->rxdma_channel[slot]); 8766 pr_cont("%d ", rt->rxdma_channel[slot]);
8835 8767
8836 if (++this_channel_offset == num_channels) 8768 if (++this_channel_offset == num_channels)
8837 this_channel_offset = 0; 8769 this_channel_offset = 0;
8838 } 8770 }
8839 printk("]\n"); 8771 pr_cont("]\n");
8840 } 8772 }
8841 8773
8842 parent->rdc_default[i] = rdc_channel_base; 8774 parent->rdc_default[i] = rdc_channel_base;
@@ -8996,8 +8928,7 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
8996 break; 8928 break;
8997 8929
8998 default: 8930 default:
8999 printk(KERN_ERR PFX "Unsupported port config " 8931 pr_err("Unsupported port config 10G[%d] 1G[%d]\n",
9000 "10G[%d] 1G[%d]\n",
9001 num_10g, num_1g); 8932 num_10g, num_1g);
9002 return -EINVAL; 8933 return -EINVAL;
9003 } 8934 }
@@ -9015,8 +8946,7 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
9015 return 0; 8946 return 0;
9016 8947
9017unknown_vg_1g_port: 8948unknown_vg_1g_port:
9018 printk(KERN_ERR PFX "Cannot identify platform type, 1gport=%d\n", 8949 pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g);
9019 lowest_1g);
9020 return -EINVAL; 8950 return -EINVAL;
9021} 8951}
9022 8952
@@ -9025,9 +8955,6 @@ static int __devinit niu_probe_ports(struct niu *np)
9025 struct niu_parent *parent = np->parent; 8955 struct niu_parent *parent = np->parent;
9026 int err, i; 8956 int err, i;
9027 8957
9028 niudbg(PROBE, "niu_probe_ports(): port_phy[%08x]\n",
9029 parent->port_phy);
9030
9031 if (parent->port_phy == PORT_PHY_UNKNOWN) { 8958 if (parent->port_phy == PORT_PHY_UNKNOWN) {
9032 err = walk_phys(np, parent); 8959 err = walk_phys(np, parent);
9033 if (err) 8960 if (err)
@@ -9048,9 +8975,6 @@ static int __devinit niu_classifier_swstate_init(struct niu *np)
9048{ 8975{
9049 struct niu_classifier *cp = &np->clas; 8976 struct niu_classifier *cp = &np->clas;
9050 8977
9051 niudbg(PROBE, "niu_classifier_swstate_init: num_tcam(%d)\n",
9052 np->parent->tcam_num_entries);
9053
9054 cp->tcam_top = (u16) np->port; 8978 cp->tcam_top = (u16) np->port;
9055 cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports; 8979 cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports;
9056 cp->h1_init = 0xffffffff; 8980 cp->h1_init = 0xffffffff;
@@ -9116,8 +9040,7 @@ static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np)
9116 break; 9040 break;
9117 9041
9118 default: 9042 default:
9119 dev_err(np->device, PFX "Port %u is invalid, cannot " 9043 dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port);
9120 "compute MAC block offset.\n", np->port);
9121 return -EINVAL; 9044 return -EINVAL;
9122 } 9045 }
9123 9046
@@ -9327,9 +9250,8 @@ static int __devinit niu_get_of_props(struct niu *np)
9327 9250
9328 phy_type = of_get_property(dp, "phy-type", &prop_len); 9251 phy_type = of_get_property(dp, "phy-type", &prop_len);
9329 if (!phy_type) { 9252 if (!phy_type) {
9330 dev_err(np->device, PFX "%s: OF node lacks " 9253 netdev_err(dev, "%s: OF node lacks phy-type property\n",
9331 "phy-type property\n", 9254 dp->full_name);
9332 dp->full_name);
9333 return -EINVAL; 9255 return -EINVAL;
9334 } 9256 }
9335 9257
@@ -9339,34 +9261,26 @@ static int __devinit niu_get_of_props(struct niu *np)
9339 strcpy(np->vpd.phy_type, phy_type); 9261 strcpy(np->vpd.phy_type, phy_type);
9340 9262
9341 if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { 9263 if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
9342 dev_err(np->device, PFX "%s: Illegal phy string [%s].\n", 9264 netdev_err(dev, "%s: Illegal phy string [%s]\n",
9343 dp->full_name, np->vpd.phy_type); 9265 dp->full_name, np->vpd.phy_type);
9344 return -EINVAL; 9266 return -EINVAL;
9345 } 9267 }
9346 9268
9347 mac_addr = of_get_property(dp, "local-mac-address", &prop_len); 9269 mac_addr = of_get_property(dp, "local-mac-address", &prop_len);
9348 if (!mac_addr) { 9270 if (!mac_addr) {
9349 dev_err(np->device, PFX "%s: OF node lacks " 9271 netdev_err(dev, "%s: OF node lacks local-mac-address property\n",
9350 "local-mac-address property\n", 9272 dp->full_name);
9351 dp->full_name);
9352 return -EINVAL; 9273 return -EINVAL;
9353 } 9274 }
9354 if (prop_len != dev->addr_len) { 9275 if (prop_len != dev->addr_len) {
9355 dev_err(np->device, PFX "%s: OF MAC address prop len (%d) " 9276 netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n",
9356 "is wrong.\n", 9277 dp->full_name, prop_len);
9357 dp->full_name, prop_len);
9358 } 9278 }
9359 memcpy(dev->perm_addr, mac_addr, dev->addr_len); 9279 memcpy(dev->perm_addr, mac_addr, dev->addr_len);
9360 if (!is_valid_ether_addr(&dev->perm_addr[0])) { 9280 if (!is_valid_ether_addr(&dev->perm_addr[0])) {
9361 int i; 9281 netdev_err(dev, "%s: OF MAC address is invalid\n",
9362 9282 dp->full_name);
9363 dev_err(np->device, PFX "%s: OF MAC address is invalid\n", 9283 netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->perm_addr);
9364 dp->full_name);
9365 dev_err(np->device, PFX "%s: [ \n",
9366 dp->full_name);
9367 for (i = 0; i < 6; i++)
9368 printk("%02x ", dev->perm_addr[i]);
9369 printk("]\n");
9370 return -EINVAL; 9284 return -EINVAL;
9371 } 9285 }
9372 9286
@@ -9414,8 +9328,8 @@ static int __devinit niu_get_invariants(struct niu *np)
9414 9328
9415 nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE); 9329 nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE);
9416 offset = niu_pci_vpd_offset(np); 9330 offset = niu_pci_vpd_offset(np);
9417 niudbg(PROBE, "niu_get_invariants: VPD offset [%08x]\n", 9331 netif_printk(np, probe, KERN_DEBUG, np->dev,
9418 offset); 9332 "%s() VPD offset [%08x]\n", __func__, offset);
9419 if (offset) 9333 if (offset)
9420 niu_pci_vpd_fetch(np, offset); 9334 niu_pci_vpd_fetch(np, offset);
9421 nw64(ESPC_PIO_EN, 0); 9335 nw64(ESPC_PIO_EN, 0);
@@ -9575,8 +9489,6 @@ static struct niu_parent * __devinit niu_new_parent(struct niu *np,
9575 struct niu_parent *p; 9489 struct niu_parent *p;
9576 int i; 9490 int i;
9577 9491
9578 niudbg(PROBE, "niu_new_parent: Creating new parent.\n");
9579
9580 plat_dev = platform_device_register_simple("niu", niu_parent_index, 9492 plat_dev = platform_device_register_simple("niu", niu_parent_index,
9581 NULL, 0); 9493 NULL, 0);
9582 if (IS_ERR(plat_dev)) 9494 if (IS_ERR(plat_dev))
@@ -9641,9 +9553,6 @@ static struct niu_parent * __devinit niu_get_parent(struct niu *np,
9641 struct niu_parent *p, *tmp; 9553 struct niu_parent *p, *tmp;
9642 int port = np->port; 9554 int port = np->port;
9643 9555
9644 niudbg(PROBE, "niu_get_parent: platform_type[%u] port[%u]\n",
9645 ptype, port);
9646
9647 mutex_lock(&niu_parent_lock); 9556 mutex_lock(&niu_parent_lock);
9648 p = NULL; 9557 p = NULL;
9649 list_for_each_entry(tmp, &niu_parent_list, list) { 9558 list_for_each_entry(tmp, &niu_parent_list, list) {
@@ -9681,7 +9590,8 @@ static void niu_put_parent(struct niu *np)
9681 9590
9682 BUG_ON(!p || p->ports[port] != np); 9591 BUG_ON(!p || p->ports[port] != np);
9683 9592
9684 niudbg(PROBE, "niu_put_parent: port[%u]\n", port); 9593 netif_printk(np, probe, KERN_DEBUG, np->dev,
9594 "%s() port[%u]\n", __func__, port);
9685 9595
9686 sprintf(port_name, "port%d", port); 9596 sprintf(port_name, "port%d", port);
9687 9597
@@ -9772,7 +9682,7 @@ static struct net_device * __devinit niu_alloc_and_init(
9772 9682
9773 dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN); 9683 dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
9774 if (!dev) { 9684 if (!dev) {
9775 dev_err(gen_dev, PFX "Etherdev alloc failed, aborting.\n"); 9685 dev_err(gen_dev, "Etherdev alloc failed, aborting\n");
9776 return NULL; 9686 return NULL;
9777 } 9687 }
9778 9688
@@ -9858,30 +9768,26 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
9858 9768
9859 err = pci_enable_device(pdev); 9769 err = pci_enable_device(pdev);
9860 if (err) { 9770 if (err) {
9861 dev_err(&pdev->dev, PFX "Cannot enable PCI device, " 9771 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
9862 "aborting.\n");
9863 return err; 9772 return err;
9864 } 9773 }
9865 9774
9866 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || 9775 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
9867 !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 9776 !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9868 dev_err(&pdev->dev, PFX "Cannot find proper PCI device " 9777 dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n");
9869 "base addresses, aborting.\n");
9870 err = -ENODEV; 9778 err = -ENODEV;
9871 goto err_out_disable_pdev; 9779 goto err_out_disable_pdev;
9872 } 9780 }
9873 9781
9874 err = pci_request_regions(pdev, DRV_MODULE_NAME); 9782 err = pci_request_regions(pdev, DRV_MODULE_NAME);
9875 if (err) { 9783 if (err) {
9876 dev_err(&pdev->dev, PFX "Cannot obtain PCI resources, " 9784 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
9877 "aborting.\n");
9878 goto err_out_disable_pdev; 9785 goto err_out_disable_pdev;
9879 } 9786 }
9880 9787
9881 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 9788 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9882 if (pos <= 0) { 9789 if (pos <= 0) {
9883 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, " 9790 dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
9884 "aborting.\n");
9885 goto err_out_free_res; 9791 goto err_out_free_res;
9886 } 9792 }
9887 9793
@@ -9920,17 +9826,14 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
9920 dev->features |= NETIF_F_HIGHDMA; 9826 dev->features |= NETIF_F_HIGHDMA;
9921 err = pci_set_consistent_dma_mask(pdev, dma_mask); 9827 err = pci_set_consistent_dma_mask(pdev, dma_mask);
9922 if (err) { 9828 if (err) {
9923 dev_err(&pdev->dev, PFX "Unable to obtain 44 bit " 9829 dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
9924 "DMA for consistent allocations, "
9925 "aborting.\n");
9926 goto err_out_release_parent; 9830 goto err_out_release_parent;
9927 } 9831 }
9928 } 9832 }
9929 if (err || dma_mask == DMA_BIT_MASK(32)) { 9833 if (err || dma_mask == DMA_BIT_MASK(32)) {
9930 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 9834 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9931 if (err) { 9835 if (err) {
9932 dev_err(&pdev->dev, PFX "No usable DMA configuration, " 9836 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
9933 "aborting.\n");
9934 goto err_out_release_parent; 9837 goto err_out_release_parent;
9935 } 9838 }
9936 } 9839 }
@@ -9939,8 +9842,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
9939 9842
9940 np->regs = pci_ioremap_bar(pdev, 0); 9843 np->regs = pci_ioremap_bar(pdev, 0);
9941 if (!np->regs) { 9844 if (!np->regs) {
9942 dev_err(&pdev->dev, PFX "Cannot map device registers, " 9845 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
9943 "aborting.\n");
9944 err = -ENOMEM; 9846 err = -ENOMEM;
9945 goto err_out_release_parent; 9847 goto err_out_release_parent;
9946 } 9848 }
@@ -9955,15 +9857,13 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
9955 err = niu_get_invariants(np); 9857 err = niu_get_invariants(np);
9956 if (err) { 9858 if (err) {
9957 if (err != -ENODEV) 9859 if (err != -ENODEV)
9958 dev_err(&pdev->dev, PFX "Problem fetching invariants " 9860 dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
9959 "of chip, aborting.\n");
9960 goto err_out_iounmap; 9861 goto err_out_iounmap;
9961 } 9862 }
9962 9863
9963 err = register_netdev(dev); 9864 err = register_netdev(dev);
9964 if (err) { 9865 if (err) {
9965 dev_err(&pdev->dev, PFX "Cannot register net device, " 9866 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
9966 "aborting.\n");
9967 goto err_out_iounmap; 9867 goto err_out_iounmap;
9968 } 9868 }
9969 9869
@@ -10157,7 +10057,7 @@ static int __devinit niu_of_probe(struct of_device *op,
10157 10057
10158 reg = of_get_property(op->node, "reg", NULL); 10058 reg = of_get_property(op->node, "reg", NULL);
10159 if (!reg) { 10059 if (!reg) {
10160 dev_err(&op->dev, PFX "%s: No 'reg' property, aborting.\n", 10060 dev_err(&op->dev, "%s: No 'reg' property, aborting\n",
10161 op->node->full_name); 10061 op->node->full_name);
10162 return -ENODEV; 10062 return -ENODEV;
10163 } 10063 }
@@ -10186,8 +10086,7 @@ static int __devinit niu_of_probe(struct of_device *op,
10186 resource_size(&op->resource[1]), 10086 resource_size(&op->resource[1]),
10187 "niu regs"); 10087 "niu regs");
10188 if (!np->regs) { 10088 if (!np->regs) {
10189 dev_err(&op->dev, PFX "Cannot map device registers, " 10089 dev_err(&op->dev, "Cannot map device registers, aborting\n");
10190 "aborting.\n");
10191 err = -ENOMEM; 10090 err = -ENOMEM;
10192 goto err_out_release_parent; 10091 goto err_out_release_parent;
10193 } 10092 }
@@ -10196,8 +10095,7 @@ static int __devinit niu_of_probe(struct of_device *op,
10196 resource_size(&op->resource[2]), 10095 resource_size(&op->resource[2]),
10197 "niu vregs-1"); 10096 "niu vregs-1");
10198 if (!np->vir_regs_1) { 10097 if (!np->vir_regs_1) {
10199 dev_err(&op->dev, PFX "Cannot map device vir registers 1, " 10098 dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n");
10200 "aborting.\n");
10201 err = -ENOMEM; 10099 err = -ENOMEM;
10202 goto err_out_iounmap; 10100 goto err_out_iounmap;
10203 } 10101 }
@@ -10206,8 +10104,7 @@ static int __devinit niu_of_probe(struct of_device *op,
10206 resource_size(&op->resource[3]), 10104 resource_size(&op->resource[3]),
10207 "niu vregs-2"); 10105 "niu vregs-2");
10208 if (!np->vir_regs_2) { 10106 if (!np->vir_regs_2) {
10209 dev_err(&op->dev, PFX "Cannot map device vir registers 2, " 10107 dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n");
10210 "aborting.\n");
10211 err = -ENOMEM; 10108 err = -ENOMEM;
10212 goto err_out_iounmap; 10109 goto err_out_iounmap;
10213 } 10110 }
@@ -10217,15 +10114,13 @@ static int __devinit niu_of_probe(struct of_device *op,
10217 err = niu_get_invariants(np); 10114 err = niu_get_invariants(np);
10218 if (err) { 10115 if (err) {
10219 if (err != -ENODEV) 10116 if (err != -ENODEV)
10220 dev_err(&op->dev, PFX "Problem fetching invariants " 10117 dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n");
10221 "of chip, aborting.\n");
10222 goto err_out_iounmap; 10118 goto err_out_iounmap;
10223 } 10119 }
10224 10120
10225 err = register_netdev(dev); 10121 err = register_netdev(dev);
10226 if (err) { 10122 if (err) {
10227 dev_err(&op->dev, PFX "Cannot register net device, " 10123 dev_err(&op->dev, "Cannot register net device, aborting\n");
10228 "aborting.\n");
10229 goto err_out_iounmap; 10124 goto err_out_iounmap;
10230 } 10125 }
10231 10126
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 1f6327d41536..8dd509c09bc8 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1719,7 +1719,7 @@ static void ns83820_set_multicast(struct net_device *ndev)
1719 else 1719 else
1720 and_mask &= ~(RFCR_AAU | RFCR_AAM); 1720 and_mask &= ~(RFCR_AAU | RFCR_AAM);
1721 1721
1722 if (ndev->flags & IFF_ALLMULTI || ndev->mc_count) 1722 if (ndev->flags & IFF_ALLMULTI || netdev_mc_count(ndev))
1723 or_mask |= RFCR_AAM; 1723 or_mask |= RFCR_AAM;
1724 else 1724 else
1725 and_mask &= ~RFCR_AAM; 1725 and_mask &= ~RFCR_AAM;
@@ -2292,7 +2292,7 @@ static void __devexit ns83820_remove_one(struct pci_dev *pci_dev)
2292 pci_set_drvdata(pci_dev, NULL); 2292 pci_set_drvdata(pci_dev, NULL);
2293} 2293}
2294 2294
2295static struct pci_device_id ns83820_pci_tbl[] = { 2295static DEFINE_PCI_DEVICE_TABLE(ns83820_pci_tbl) = {
2296 { 0x100b, 0x0022, PCI_ANY_ID, PCI_ANY_ID, 0, .driver_data = 0, }, 2296 { 0x100b, 0x0022, PCI_ANY_ID, PCI_ANY_ID, 0, .driver_data = 0, },
2297 { 0, }, 2297 { 0, },
2298}; 2298};
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
index 050538bf155a..be368e5cbf75 100644
--- a/drivers/net/octeon/octeon_mgmt.c
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -467,7 +467,6 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
467{ 467{
468 struct octeon_mgmt *p = netdev_priv(netdev); 468 struct octeon_mgmt *p = netdev_priv(netdev);
469 int port = p->port; 469 int port = p->port;
470 int i;
471 union cvmx_agl_gmx_rxx_adr_ctl adr_ctl; 470 union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
472 union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx; 471 union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
473 unsigned long flags; 472 unsigned long flags;
@@ -493,8 +492,8 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
493 } 492 }
494 493
495 if (netdev->flags & IFF_MULTICAST) { 494 if (netdev->flags & IFF_MULTICAST) {
496 if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) 495 if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
497 || netdev->mc_count > available_cam_entries) 496 netdev_mc_count(netdev) > available_cam_entries)
498 multicast_mode = 2; /* 1 - Accept all multicast. */ 497 multicast_mode = 2; /* 1 - Accept all multicast. */
499 else 498 else
500 multicast_mode = 0; /* 0 - Use CAM. */ 499 multicast_mode = 0; /* 0 - Use CAM. */
@@ -511,12 +510,8 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
511 } 510 }
512 } 511 }
513 if (multicast_mode == 0) { 512 if (multicast_mode == 0) {
514 i = netdev->mc_count; 513 netdev_for_each_mc_addr(list, netdev)
515 list = netdev->mc_list;
516 while (i--) {
517 octeon_mgmt_cam_state_add(&cam_state, list->da_addr); 514 octeon_mgmt_cam_state_add(&cam_state, list->da_addr);
518 list = list->next;
519 }
520 } 515 }
521 516
522 517
@@ -1119,11 +1114,8 @@ static int __init octeon_mgmt_probe(struct platform_device *pdev)
1119 1114
1120 if (p->port >= octeon_bootinfo->mac_addr_count) 1115 if (p->port >= octeon_bootinfo->mac_addr_count)
1121 dev_err(&pdev->dev, 1116 dev_err(&pdev->dev,
1122 "Error %s: Using MAC outside of the assigned range: " 1117 "Error %s: Using MAC outside of the assigned range: %pM\n",
1123 "%02x:%02x:%02x:%02x:%02x:%02x\n", netdev->name, 1118 netdev->name, netdev->dev_addr);
1124 netdev->dev_addr[0], netdev->dev_addr[1],
1125 netdev->dev_addr[2], netdev->dev_addr[3],
1126 netdev->dev_addr[4], netdev->dev_addr[5]);
1127 1119
1128 if (register_netdev(netdev)) 1120 if (register_netdev(netdev))
1129 goto err; 1121 goto err;
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 1673eb045e1e..d44d4a208bbf 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -1875,7 +1875,7 @@ static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
1875 free_netdev(netdev); 1875 free_netdev(netdev);
1876} 1876}
1877 1877
1878static struct pci_device_id pasemi_mac_pci_tbl[] = { 1878static DEFINE_PCI_DEVICE_TABLE(pasemi_mac_pci_tbl) = {
1879 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) }, 1879 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
1880 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) }, 1880 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
1881 { }, 1881 { },
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 480af402affd..36785853a149 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -11,7 +11,7 @@
11 11
12 -----<snip>----- 12 -----<snip>-----
13 13
14 Written 1997-2000 by Donald Becker. 14 Written 1997-2000 by Donald Becker.
15 This software may be used and distributed according to the 15 This software may be used and distributed according to the
16 terms of the GNU General Public License (GPL), incorporated 16 terms of the GNU General Public License (GPL), incorporated
17 herein by reference. Drivers based on or derived from this 17 herein by reference. Drivers based on or derived from this
@@ -85,6 +85,8 @@ IVc. Errata
85 85
86*/ 86*/
87 87
88#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
89
88#include <linux/module.h> 90#include <linux/module.h>
89#include <linux/kernel.h> 91#include <linux/kernel.h>
90#include <linux/pci.h> 92#include <linux/pci.h>
@@ -96,16 +98,15 @@ IVc. Errata
96#include <linux/ethtool.h> 98#include <linux/ethtool.h>
97#include <linux/mii.h> 99#include <linux/mii.h>
98#include <linux/crc32.h> 100#include <linux/crc32.h>
99#include <asm/io.h> 101#include <linux/io.h>
100 102
101#define NETDRV_VERSION "1.0.1" 103#define NETDRV_VERSION "1.0.1"
102#define MODNAME "netdrv" 104#define MODNAME "netdrv"
103#define NETDRV_DRIVER_LOAD_MSG "MyVendor Fast Ethernet driver " NETDRV_VERSION " loaded" 105#define NETDRV_DRIVER_LOAD_MSG "MyVendor Fast Ethernet driver " NETDRV_VERSION " loaded"
104#define PFX MODNAME ": "
105 106
106static char version[] __devinitdata = 107static char version[] __devinitdata =
107KERN_INFO NETDRV_DRIVER_LOAD_MSG "\n" 108 KERN_INFO NETDRV_DRIVER_LOAD_MSG "\n"
108" Support available from http://foo.com/bar/baz.html\n"; 109 " Support available from http://foo.com/bar/baz.html\n";
109 110
110/* define to 1 to enable PIO instead of MMIO */ 111/* define to 1 to enable PIO instead of MMIO */
111#undef USE_IO_OPS 112#undef USE_IO_OPS
@@ -119,19 +120,24 @@ KERN_INFO NETDRV_DRIVER_LOAD_MSG "\n"
119 120
120#ifdef NETDRV_DEBUG 121#ifdef NETDRV_DEBUG
121/* note: prints function name for you */ 122/* note: prints function name for you */
122# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args) 123#define DPRINTK(fmt, args...) \
124 printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
123#else 125#else
124# define DPRINTK(fmt, args...) 126#define DPRINTK(fmt, args...) \
127do { \
128 if (0) \
129 printk(KERN_DEBUG fmt, ##args); \
130} while (0)
125#endif 131#endif
126 132
127#ifdef NETDRV_NDEBUG 133#ifdef NETDRV_NDEBUG
128# define assert(expr) do {} while (0) 134#define assert(expr) do {} while (0)
129#else 135#else
130# define assert(expr) \ 136#define assert(expr) \
131 if(!(expr)) { \ 137 if (!(expr)) { \
132 printk( "Assertion failed! %s,%s,%s,line=%d\n", \ 138 printk("Assertion failed! %s,%s,%s,line=%d\n", \
133 #expr,__FILE__,__func__,__LINE__); \ 139 #expr, __FILE__, __func__, __LINE__); \
134 } 140 }
135#endif 141#endif
136 142
137 143
@@ -148,10 +154,10 @@ static int multicast_filter_limit = 32;
148 154
149/* Size of the in-memory receive ring. */ 155/* Size of the in-memory receive ring. */
150#define RX_BUF_LEN_IDX 2 /* 0==8K, 1==16K, 2==32K, 3==64K */ 156#define RX_BUF_LEN_IDX 2 /* 0==8K, 1==16K, 2==32K, 3==64K */
151#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX) 157#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX)
152#define RX_BUF_PAD 16 158#define RX_BUF_PAD 16
153#define RX_BUF_WRAP_PAD 2048 /* spare padding to handle lack of packet wrap */ 159#define RX_BUF_WRAP_PAD 2048 /* spare padding to handle lack of packet wrap */
154#define RX_BUF_TOT_LEN (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD) 160#define RX_BUF_TOT_LEN (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD)
155 161
156/* Number of Tx descriptor registers. */ 162/* Number of Tx descriptor registers. */
157#define NUM_TX_DESC 4 163#define NUM_TX_DESC 4
@@ -165,9 +171,11 @@ static int multicast_filter_limit = 32;
165 171
166/* PCI Tuning Parameters 172/* PCI Tuning Parameters
167 Threshold is bytes transferred to chip before transmission starts. */ 173 Threshold is bytes transferred to chip before transmission starts. */
168#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */ 174#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */
169 175
170/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */ 176/* The following settings are log_2(bytes)-4:
177 0==16 bytes 1==32 2==64 3==128 4==256 5==512 6==1024 7==end of packet.
178*/
171#define RX_FIFO_THRESH 6 /* Rx buffer level before first PCI xfer. */ 179#define RX_FIFO_THRESH 6 /* Rx buffer level before first PCI xfer. */
172#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ 180#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
173#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ 181#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
@@ -175,8 +183,7 @@ static int multicast_filter_limit = 32;
175 183
176/* Operational parameters that usually are not changed. */ 184/* Operational parameters that usually are not changed. */
177/* Time in jiffies before concluding the transmitter is hung. */ 185/* Time in jiffies before concluding the transmitter is hung. */
178#define TX_TIMEOUT (6*HZ) 186#define TX_TIMEOUT (6 * HZ)
179
180 187
181enum { 188enum {
182 HAS_CHIP_XCVR = 0x020000, 189 HAS_CHIP_XCVR = 0x020000,
@@ -186,7 +193,7 @@ enum {
186#define NETDRV_MIN_IO_SIZE 0x80 193#define NETDRV_MIN_IO_SIZE 0x80
187#define RTL8139B_IO_SIZE 256 194#define RTL8139B_IO_SIZE 256
188 195
189#define NETDRV_CAPS HAS_CHIP_XCVR|HAS_LNK_CHNG 196#define NETDRV_CAPS (HAS_CHIP_XCVR | HAS_LNK_CHNG)
190 197
191typedef enum { 198typedef enum {
192 RTL8139 = 0, 199 RTL8139 = 0,
@@ -211,7 +218,7 @@ static struct {
211}; 218};
212 219
213 220
214static struct pci_device_id netdrv_pci_tbl[] = { 221static DEFINE_PCI_DEVICE_TABLE(netdrv_pci_tbl) = {
215 {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, 222 {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
216 {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NETDRV_CB }, 223 {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NETDRV_CB },
217 {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMC1211TX }, 224 {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMC1211TX },
@@ -220,7 +227,7 @@ static struct pci_device_id netdrv_pci_tbl[] = {
220 {0x4033, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ADDTRON8139 }, 227 {0x4033, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ADDTRON8139 },
221 {0,} 228 {0,}
222}; 229};
223MODULE_DEVICE_TABLE (pci, netdrv_pci_tbl); 230MODULE_DEVICE_TABLE(pci, netdrv_pci_tbl);
224 231
225 232
226/* The rest of these values should never change. */ 233/* The rest of these values should never change. */
@@ -270,7 +277,7 @@ enum NETDRV_registers {
270enum ClearBitMasks { 277enum ClearBitMasks {
271 MultiIntrClear = 0xF000, 278 MultiIntrClear = 0xF000,
272 ChipCmdClear = 0xE2, 279 ChipCmdClear = 0xE2,
273 Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1), 280 Config1Clear = (1 << 7) | (1 << 6) | (1 << 3) | (1 << 2) | (1 << 1),
274}; 281};
275 282
276enum ChipCmdBits { 283enum ChipCmdBits {
@@ -329,7 +336,7 @@ enum tx_config_bits {
329 TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */ 336 TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */
330 TxCRC = (1 << 16), /* DISABLE appending CRC to end of Tx packets */ 337 TxCRC = (1 << 16), /* DISABLE appending CRC to end of Tx packets */
331 TxClearAbt = (1 << 0), /* Clear abort (WO) */ 338 TxClearAbt = (1 << 0), /* Clear abort (WO) */
332 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ 339 TxDMAShift = 8, /* DMA burst value(0-7) is shift this many bits */
333 340
334 TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */ 341 TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */
335}; 342};
@@ -481,41 +488,44 @@ struct netdrv_private {
481 chip_t chipset; 488 chip_t chipset;
482}; 489};
483 490
484MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>"); 491MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
485MODULE_DESCRIPTION ("Skeleton for a PCI Fast Ethernet driver"); 492MODULE_DESCRIPTION("Skeleton for a PCI Fast Ethernet driver");
486MODULE_LICENSE("GPL"); 493MODULE_LICENSE("GPL");
487module_param(multicast_filter_limit, int, 0); 494module_param(multicast_filter_limit, int, 0);
488module_param(max_interrupt_work, int, 0); 495module_param(max_interrupt_work, int, 0);
489module_param_array(media, int, NULL, 0); 496module_param_array(media, int, NULL, 0);
490MODULE_PARM_DESC (multicast_filter_limit, "pci-skeleton maximum number of filtered multicast addresses"); 497MODULE_PARM_DESC(multicast_filter_limit,
491MODULE_PARM_DESC (max_interrupt_work, "pci-skeleton maximum events handled per interrupt"); 498 MODNAME " maximum number of filtered multicast addresses");
492MODULE_PARM_DESC (media, "pci-skeleton: Bits 0-3: media type, bit 17: full duplex"); 499MODULE_PARM_DESC(max_interrupt_work,
493 500 MODNAME " maximum events handled per interrupt");
494static int read_eeprom (void *ioaddr, int location, int addr_len); 501MODULE_PARM_DESC(media,
495static int netdrv_open (struct net_device *dev); 502 MODNAME " Bits 0-3: media type, bit 17: full duplex");
496static int mdio_read (struct net_device *dev, int phy_id, int location); 503
497static void mdio_write (struct net_device *dev, int phy_id, int location, 504static int read_eeprom(void *ioaddr, int location, int addr_len);
498 int val); 505static int netdrv_open(struct net_device *dev);
499static void netdrv_timer (unsigned long data); 506static int mdio_read(struct net_device *dev, int phy_id, int location);
500static void netdrv_tx_timeout (struct net_device *dev); 507static void mdio_write(struct net_device *dev, int phy_id, int location,
501static void netdrv_init_ring (struct net_device *dev); 508 int val);
502static int netdrv_start_xmit (struct sk_buff *skb, 509static void netdrv_timer(unsigned long data);
503 struct net_device *dev); 510static void netdrv_tx_timeout(struct net_device *dev);
504static irqreturn_t netdrv_interrupt (int irq, void *dev_instance); 511static void netdrv_init_ring(struct net_device *dev);
505static int netdrv_close (struct net_device *dev); 512static int netdrv_start_xmit(struct sk_buff *skb,
506static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); 513 struct net_device *dev);
507static void netdrv_set_rx_mode (struct net_device *dev); 514static irqreturn_t netdrv_interrupt(int irq, void *dev_instance);
508static void netdrv_hw_start (struct net_device *dev); 515static int netdrv_close(struct net_device *dev);
516static int netdrv_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
517static void netdrv_set_rx_mode(struct net_device *dev);
518static void netdrv_hw_start(struct net_device *dev);
509 519
510 520
511#ifdef USE_IO_OPS 521#ifdef USE_IO_OPS
512 522
513#define NETDRV_R8(reg) inb (((unsigned long)ioaddr) + (reg)) 523#define NETDRV_R8(reg) inb(((unsigned long)ioaddr) + (reg))
514#define NETDRV_R16(reg) inw (((unsigned long)ioaddr) + (reg)) 524#define NETDRV_R16(reg) inw(((unsigned long)ioaddr) + (reg))
515#define NETDRV_R32(reg) ((unsigned long) inl (((unsigned long)ioaddr) + (reg))) 525#define NETDRV_R32(reg) ((unsigned long)inl(((unsigned long)ioaddr) + (reg)))
516#define NETDRV_W8(reg, val8) outb ((val8), ((unsigned long)ioaddr) + (reg)) 526#define NETDRV_W8(reg, val8) outb((val8), ((unsigned long)ioaddr) + (reg))
517#define NETDRV_W16(reg, val16) outw ((val16), ((unsigned long)ioaddr) + (reg)) 527#define NETDRV_W16(reg, val16) outw((val16), ((unsigned long)ioaddr) + (reg))
518#define NETDRV_W32(reg, val32) outl ((val32), ((unsigned long)ioaddr) + (reg)) 528#define NETDRV_W32(reg, val32) outl((val32), ((unsigned long)ioaddr) + (reg))
519#define NETDRV_W8_F NETDRV_W8 529#define NETDRV_W8_F NETDRV_W8
520#define NETDRV_W16_F NETDRV_W16 530#define NETDRV_W16_F NETDRV_W16
521#define NETDRV_W32_F NETDRV_W32 531#define NETDRV_W32_F NETDRV_W32
@@ -528,25 +538,37 @@ static void netdrv_hw_start (struct net_device *dev);
528#define readb(addr) inb((unsigned long)(addr)) 538#define readb(addr) inb((unsigned long)(addr))
529#define readw(addr) inw((unsigned long)(addr)) 539#define readw(addr) inw((unsigned long)(addr))
530#define readl(addr) inl((unsigned long)(addr)) 540#define readl(addr) inl((unsigned long)(addr))
531#define writeb(val,addr) outb((val),(unsigned long)(addr)) 541#define writeb(val, addr) outb((val), (unsigned long)(addr))
532#define writew(val,addr) outw((val),(unsigned long)(addr)) 542#define writew(val, addr) outw((val), (unsigned long)(addr))
533#define writel(val,addr) outl((val),(unsigned long)(addr)) 543#define writel(val, addr) outl((val), (unsigned long)(addr))
534 544
535#else 545#else
536 546
537/* write MMIO register, with flush */ 547/* write MMIO register, with flush */
538/* Flush avoids rtl8139 bug w/ posted MMIO writes */ 548/* Flush avoids rtl8139 bug w/ posted MMIO writes */
539#define NETDRV_W8_F(reg, val8) do { writeb ((val8), ioaddr + (reg)); readb (ioaddr + (reg)); } while (0) 549#define NETDRV_W8_F(reg, val8) \
540#define NETDRV_W16_F(reg, val16) do { writew ((val16), ioaddr + (reg)); readw (ioaddr + (reg)); } while (0) 550do { \
541#define NETDRV_W32_F(reg, val32) do { writel ((val32), ioaddr + (reg)); readl (ioaddr + (reg)); } while (0) 551 writeb((val8), ioaddr + (reg)); \
552 readb(ioaddr + (reg)); \
553} while (0)
554#define NETDRV_W16_F(reg, val16) \
555do { \
556 writew((val16), ioaddr + (reg)); \
557 readw(ioaddr + (reg)); \
558} while (0)
559#define NETDRV_W32_F(reg, val32) \
560do { \
561 writel((val32), ioaddr + (reg)); \
562 readl(ioaddr + (reg)); \
563} while (0)
542 564
543 565
544#ifdef MMIO_FLUSH_AUDIT_COMPLETE 566#ifdef MMIO_FLUSH_AUDIT_COMPLETE
545 567
546/* write MMIO register */ 568/* write MMIO register */
547#define NETDRV_W8(reg, val8) writeb ((val8), ioaddr + (reg)) 569#define NETDRV_W8(reg, val8) writeb((val8), ioaddr + (reg))
548#define NETDRV_W16(reg, val16) writew ((val16), ioaddr + (reg)) 570#define NETDRV_W16(reg, val16) writew((val16), ioaddr + (reg))
549#define NETDRV_W32(reg, val32) writel ((val32), ioaddr + (reg)) 571#define NETDRV_W32(reg, val32) writel((val32), ioaddr + (reg))
550 572
551#else 573#else
552 574
@@ -558,9 +580,9 @@ static void netdrv_hw_start (struct net_device *dev);
558#endif /* MMIO_FLUSH_AUDIT_COMPLETE */ 580#endif /* MMIO_FLUSH_AUDIT_COMPLETE */
559 581
560/* read MMIO register */ 582/* read MMIO register */
561#define NETDRV_R8(reg) readb (ioaddr + (reg)) 583#define NETDRV_R8(reg) readb(ioaddr + (reg))
562#define NETDRV_R16(reg) readw (ioaddr + (reg)) 584#define NETDRV_R16(reg) readw(ioaddr + (reg))
563#define NETDRV_R32(reg) ((unsigned long) readl (ioaddr + (reg))) 585#define NETDRV_R32(reg) ((unsigned long) readl(ioaddr + (reg)))
564 586
565#endif /* USE_IO_OPS */ 587#endif /* USE_IO_OPS */
566 588
@@ -570,14 +592,14 @@ static const u16 netdrv_intr_mask =
570 TxErr | TxOK | RxErr | RxOK; 592 TxErr | TxOK | RxErr | RxOK;
571 593
572static const unsigned int netdrv_rx_config = 594static const unsigned int netdrv_rx_config =
573 RxCfgEarlyRxNone | RxCfgRcv32K | RxNoWrap | 595 RxCfgEarlyRxNone | RxCfgRcv32K | RxNoWrap |
574 (RX_FIFO_THRESH << RxCfgFIFOShift) | 596 (RX_FIFO_THRESH << RxCfgFIFOShift) |
575 (RX_DMA_BURST << RxCfgDMAShift); 597 (RX_DMA_BURST << RxCfgDMAShift);
576 598
577 599
578static int __devinit netdrv_init_board (struct pci_dev *pdev, 600static int __devinit netdrv_init_board(struct pci_dev *pdev,
579 struct net_device **dev_out, 601 struct net_device **dev_out,
580 void **ioaddr_out) 602 void **ioaddr_out)
581{ 603{
582 void *ioaddr = NULL; 604 void *ioaddr = NULL;
583 struct net_device *dev; 605 struct net_device *dev;
@@ -587,43 +609,43 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev,
587 unsigned long mmio_start, mmio_end, mmio_flags, mmio_len; 609 unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
588 u32 tmp; 610 u32 tmp;
589 611
590 DPRINTK ("ENTER\n"); 612 DPRINTK("ENTER\n");
591 613
592 assert (pdev != NULL); 614 assert(pdev != NULL);
593 assert (ioaddr_out != NULL); 615 assert(ioaddr_out != NULL);
594 616
595 *ioaddr_out = NULL; 617 *ioaddr_out = NULL;
596 *dev_out = NULL; 618 *dev_out = NULL;
597 619
598 /* dev zeroed in alloc_etherdev */ 620 /* dev zeroed in alloc_etherdev */
599 dev = alloc_etherdev (sizeof (*tp)); 621 dev = alloc_etherdev(sizeof(*tp));
600 if (dev == NULL) { 622 if (dev == NULL) {
601 dev_err(&pdev->dev, "unable to alloc new ethernet\n"); 623 dev_err(&pdev->dev, "unable to alloc new ethernet\n");
602 DPRINTK ("EXIT, returning -ENOMEM\n"); 624 DPRINTK("EXIT, returning -ENOMEM\n");
603 return -ENOMEM; 625 return -ENOMEM;
604 } 626 }
605 SET_NETDEV_DEV(dev, &pdev->dev); 627 SET_NETDEV_DEV(dev, &pdev->dev);
606 tp = netdev_priv(dev); 628 tp = netdev_priv(dev);
607 629
608 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 630 /* enable device(incl. PCI PM wakeup), and bus-mastering */
609 rc = pci_enable_device (pdev); 631 rc = pci_enable_device(pdev);
610 if (rc) 632 if (rc)
611 goto err_out; 633 goto err_out;
612 634
613 pio_start = pci_resource_start (pdev, 0); 635 pio_start = pci_resource_start(pdev, 0);
614 pio_end = pci_resource_end (pdev, 0); 636 pio_end = pci_resource_end(pdev, 0);
615 pio_flags = pci_resource_flags (pdev, 0); 637 pio_flags = pci_resource_flags(pdev, 0);
616 pio_len = pci_resource_len (pdev, 0); 638 pio_len = pci_resource_len(pdev, 0);
617 639
618 mmio_start = pci_resource_start (pdev, 1); 640 mmio_start = pci_resource_start(pdev, 1);
619 mmio_end = pci_resource_end (pdev, 1); 641 mmio_end = pci_resource_end(pdev, 1);
620 mmio_flags = pci_resource_flags (pdev, 1); 642 mmio_flags = pci_resource_flags(pdev, 1);
621 mmio_len = pci_resource_len (pdev, 1); 643 mmio_len = pci_resource_len(pdev, 1);
622 644
623 /* set this immediately, we need to know before 645 /* set this immediately, we need to know before
624 * we talk to the chip directly */ 646 * we talk to the chip directly */
625 DPRINTK("PIO region size == 0x%02X\n", pio_len); 647 DPRINTK("PIO region size == %#02X\n", pio_len);
626 DPRINTK("MMIO region size == 0x%02lX\n", mmio_len); 648 DPRINTK("MMIO region size == %#02lX\n", mmio_len);
627 649
628 /* make sure PCI base addr 0 is PIO */ 650 /* make sure PCI base addr 0 is PIO */
629 if (!(pio_flags & IORESOURCE_IO)) { 651 if (!(pio_flags & IORESOURCE_IO)) {
@@ -647,17 +669,17 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev,
647 goto err_out; 669 goto err_out;
648 } 670 }
649 671
650 rc = pci_request_regions (pdev, MODNAME); 672 rc = pci_request_regions(pdev, MODNAME);
651 if (rc) 673 if (rc)
652 goto err_out; 674 goto err_out;
653 675
654 pci_set_master (pdev); 676 pci_set_master(pdev);
655 677
656#ifdef USE_IO_OPS 678#ifdef USE_IO_OPS
657 ioaddr = (void *) pio_start; 679 ioaddr = (void *)pio_start;
658#else 680#else
659 /* ioremap MMIO region */ 681 /* ioremap MMIO region */
660 ioaddr = ioremap (mmio_start, mmio_len); 682 ioaddr = ioremap(mmio_start, mmio_len);
661 if (ioaddr == NULL) { 683 if (ioaddr == NULL) {
662 dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); 684 dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
663 rc = -EIO; 685 rc = -EIO;
@@ -666,52 +688,50 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev,
666#endif /* USE_IO_OPS */ 688#endif /* USE_IO_OPS */
667 689
668 /* Soft reset the chip. */ 690 /* Soft reset the chip. */
669 NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear) | CmdReset); 691 NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) | CmdReset);
670 692
671 /* Check that the chip has finished the reset. */ 693 /* Check that the chip has finished the reset. */
672 for (i = 1000; i > 0; i--) 694 for (i = 1000; i > 0; i--)
673 if ((NETDRV_R8 (ChipCmd) & CmdReset) == 0) 695 if ((NETDRV_R8(ChipCmd) & CmdReset) == 0)
674 break; 696 break;
675 else 697 else
676 udelay (10); 698 udelay(10);
677 699
678 /* Bring the chip out of low-power mode. */ 700 /* Bring the chip out of low-power mode. */
679 /* <insert device-specific code here> */ 701 /* <insert device-specific code here> */
680 702
681#ifndef USE_IO_OPS 703#ifndef USE_IO_OPS
682 /* sanity checks -- ensure PIO and MMIO registers agree */ 704 /* sanity checks -- ensure PIO and MMIO registers agree */
683 assert (inb (pio_start+Config0) == readb (ioaddr+Config0)); 705 assert(inb(pio_start+Config0) == readb(ioaddr+Config0));
684 assert (inb (pio_start+Config1) == readb (ioaddr+Config1)); 706 assert(inb(pio_start+Config1) == readb(ioaddr+Config1));
685 assert (inb (pio_start+TxConfig) == readb (ioaddr+TxConfig)); 707 assert(inb(pio_start+TxConfig) == readb(ioaddr+TxConfig));
686 assert (inb (pio_start+RxConfig) == readb (ioaddr+RxConfig)); 708 assert(inb(pio_start+RxConfig) == readb(ioaddr+RxConfig));
687#endif /* !USE_IO_OPS */ 709#endif /* !USE_IO_OPS */
688 710
689 /* identify chip attached to board */ 711 /* identify chip attached to board */
690 tmp = NETDRV_R8 (ChipVersion); 712 tmp = NETDRV_R8(ChipVersion);
691 for (i = ARRAY_SIZE (rtl_chip_info) - 1; i >= 0; i--) 713 for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--)
692 if (tmp == rtl_chip_info[i].version) { 714 if (tmp == rtl_chip_info[i].version) {
693 tp->chipset = i; 715 tp->chipset = i;
694 goto match; 716 goto match;
695 } 717 }
696 718
697 /* if unknown chip, assume array element #0, original RTL-8139 in this case */ 719 /* if unknown chip, assume array element #0, original RTL-8139 in this case */
698 dev_printk (KERN_DEBUG, &pdev->dev, 720 dev_printk(KERN_DEBUG, &pdev->dev,
699 "unknown chip version, assuming RTL-8139\n"); 721 "unknown chip version, assuming RTL-8139\n");
700 dev_printk (KERN_DEBUG, &pdev->dev, "TxConfig = 0x%lx\n", 722 dev_printk(KERN_DEBUG, &pdev->dev, "TxConfig = %#lx\n",
701 NETDRV_R32 (TxConfig)); 723 NETDRV_R32(TxConfig));
702 tp->chipset = 0; 724 tp->chipset = 0;
703 725
704match: 726match:
705 DPRINTK ("chipset id (%d) == index %d, '%s'\n", 727 DPRINTK("chipset id(%d) == index %d, '%s'\n",
706 tmp, 728 tmp, tp->chipset, rtl_chip_info[tp->chipset].name);
707 tp->chipset,
708 rtl_chip_info[tp->chipset].name);
709 729
710 rc = register_netdev (dev); 730 rc = register_netdev(dev);
711 if (rc) 731 if (rc)
712 goto err_out_unmap; 732 goto err_out_unmap;
713 733
714 DPRINTK ("EXIT, returning 0\n"); 734 DPRINTK("EXIT, returning 0\n");
715 *ioaddr_out = ioaddr; 735 *ioaddr_out = ioaddr;
716 *dev_out = dev; 736 *dev_out = dev;
717 return 0; 737 return 0;
@@ -721,10 +741,10 @@ err_out_unmap:
721 iounmap(ioaddr); 741 iounmap(ioaddr);
722err_out_free_res: 742err_out_free_res:
723#endif 743#endif
724 pci_release_regions (pdev); 744 pci_release_regions(pdev);
725err_out: 745err_out:
726 free_netdev (dev); 746 free_netdev(dev);
727 DPRINTK ("EXIT, returning %d\n", rc); 747 DPRINTK("EXIT, returning %d\n", rc);
728 return rc; 748 return rc;
729} 749}
730 750
@@ -740,8 +760,8 @@ static const struct net_device_ops netdrv_netdev_ops = {
740 .ndo_set_mac_address = eth_mac_addr, 760 .ndo_set_mac_address = eth_mac_addr,
741}; 761};
742 762
743static int __devinit netdrv_init_one (struct pci_dev *pdev, 763static int __devinit netdrv_init_one(struct pci_dev *pdev,
744 const struct pci_device_id *ent) 764 const struct pci_device_id *ent)
745{ 765{
746 struct net_device *dev = NULL; 766 struct net_device *dev = NULL;
747 struct netdrv_private *tp; 767 struct netdrv_private *tp;
@@ -756,29 +776,29 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev,
756 printk(version); 776 printk(version);
757#endif 777#endif
758 778
759 DPRINTK ("ENTER\n"); 779 DPRINTK("ENTER\n");
760 780
761 assert (pdev != NULL); 781 assert(pdev != NULL);
762 assert (ent != NULL); 782 assert(ent != NULL);
763 783
764 board_idx++; 784 board_idx++;
765 785
766 i = netdrv_init_board (pdev, &dev, &ioaddr); 786 i = netdrv_init_board(pdev, &dev, &ioaddr);
767 if (i < 0) { 787 if (i < 0) {
768 DPRINTK ("EXIT, returning %d\n", i); 788 DPRINTK("EXIT, returning %d\n", i);
769 return i; 789 return i;
770 } 790 }
771 791
772 tp = netdev_priv(dev); 792 tp = netdev_priv(dev);
773 793
774 assert (ioaddr != NULL); 794 assert(ioaddr != NULL);
775 assert (dev != NULL); 795 assert(dev != NULL);
776 assert (tp != NULL); 796 assert(tp != NULL);
777 797
778 addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6; 798 addr_len = read_eeprom(ioaddr, 0, 8) == 0x8129 ? 8 : 6;
779 for (i = 0; i < 3; i++) 799 for (i = 0; i < 3; i++)
780 ((u16 *) (dev->dev_addr))[i] = 800 ((u16 *)(dev->dev_addr))[i] =
781 le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len)); 801 le16_to_cpu(read_eeprom(ioaddr, i + 7, addr_len));
782 802
783 dev->netdev_ops = &netdrv_netdev_ops; 803 dev->netdev_ops = &netdrv_netdev_ops;
784 dev->watchdog_timeo = TX_TIMEOUT; 804 dev->watchdog_timeo = TX_TIMEOUT;
@@ -791,7 +811,7 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev,
791 811
792 /* note: tp->chipset set in netdrv_init_board */ 812 /* note: tp->chipset set in netdrv_init_board */
793 tp->drv_flags = PCI_COMMAND_IO | PCI_COMMAND_MEMORY | 813 tp->drv_flags = PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
794 PCI_COMMAND_MASTER | NETDRV_CAPS; 814 PCI_COMMAND_MASTER | NETDRV_CAPS;
795 tp->pci_dev = pdev; 815 tp->pci_dev = pdev;
796 tp->board = ent->driver_data; 816 tp->board = ent->driver_data;
797 tp->mmio_addr = ioaddr; 817 tp->mmio_addr = ioaddr;
@@ -801,18 +821,15 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev,
801 821
802 tp->phys[0] = 32; 822 tp->phys[0] = 32;
803 823
804 printk (KERN_INFO "%s: %s at 0x%lx, %pM IRQ %d\n", 824 netdev_info(dev, "%s at %#lx, %pM IRQ %d\n",
805 dev->name, 825 board_info[ent->driver_data].name,
806 board_info[ent->driver_data].name, 826 dev->base_addr, dev->dev_addr, dev->irq);
807 dev->base_addr,
808 dev->dev_addr,
809 dev->irq);
810 827
811 printk (KERN_DEBUG "%s: Identified 8139 chip type '%s'\n", 828 netdev_printk(KERN_DEBUG, dev, "Identified 8139 chip type '%s'\n",
812 dev->name, rtl_chip_info[tp->chipset].name); 829 rtl_chip_info[tp->chipset].name);
813 830
814 /* Put the chip into low-power mode. */ 831 /* Put the chip into low-power mode. */
815 NETDRV_W8_F (Cfg9346, Cfg9346_Unlock); 832 NETDRV_W8_F(Cfg9346, Cfg9346_Unlock);
816 833
817 /* The lower four bits are the media type. */ 834 /* The lower four bits are the media type. */
818 option = (board_idx > 7) ? 0 : media[board_idx]; 835 option = (board_idx > 7) ? 0 : media[board_idx];
@@ -824,45 +841,43 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev,
824 } 841 }
825 842
826 if (tp->full_duplex) { 843 if (tp->full_duplex) {
827 printk (KERN_INFO 844 netdev_info(dev, "Media type forced to Full Duplex\n");
828 "%s: Media type forced to Full Duplex.\n", 845 mdio_write(dev, tp->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
829 dev->name);
830 mdio_write (dev, tp->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
831 tp->duplex_lock = 1; 846 tp->duplex_lock = 1;
832 } 847 }
833 848
834 DPRINTK ("EXIT - returning 0\n"); 849 DPRINTK("EXIT - returning 0\n");
835 return 0; 850 return 0;
836} 851}
837 852
838 853
839static void __devexit netdrv_remove_one (struct pci_dev *pdev) 854static void __devexit netdrv_remove_one(struct pci_dev *pdev)
840{ 855{
841 struct net_device *dev = pci_get_drvdata (pdev); 856 struct net_device *dev = pci_get_drvdata(pdev);
842 struct netdrv_private *np; 857 struct netdrv_private *np;
843 858
844 DPRINTK ("ENTER\n"); 859 DPRINTK("ENTER\n");
845 860
846 assert (dev != NULL); 861 assert(dev != NULL);
847 862
848 np = netdev_priv(dev); 863 np = netdev_priv(dev);
849 assert (np != NULL); 864 assert(np != NULL);
850 865
851 unregister_netdev (dev); 866 unregister_netdev(dev);
852 867
853#ifndef USE_IO_OPS 868#ifndef USE_IO_OPS
854 iounmap (np->mmio_addr); 869 iounmap(np->mmio_addr);
855#endif /* !USE_IO_OPS */ 870#endif /* !USE_IO_OPS */
856 871
857 pci_release_regions (pdev); 872 pci_release_regions(pdev);
858 873
859 free_netdev (dev); 874 free_netdev(dev);
860 875
861 pci_set_drvdata (pdev, NULL); 876 pci_set_drvdata(pdev, NULL);
862 877
863 pci_disable_device (pdev); 878 pci_disable_device(pdev);
864 879
865 DPRINTK ("EXIT\n"); 880 DPRINTK("EXIT\n");
866} 881}
867 882
868 883
@@ -870,63 +885,63 @@ static void __devexit netdrv_remove_one (struct pci_dev *pdev)
870 885
871/* EEPROM_Ctrl bits. */ 886/* EEPROM_Ctrl bits. */
872#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */ 887#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
873#define EE_CS 0x08 /* EEPROM chip select. */ 888#define EE_CS 0x08 /* EEPROM chip select. */
874#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */ 889#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
875#define EE_WRITE_0 0x00 890#define EE_WRITE_0 0x00
876#define EE_WRITE_1 0x02 891#define EE_WRITE_1 0x02
877#define EE_DATA_READ 0x01 /* EEPROM chip data out. */ 892#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
878#define EE_ENB (0x80 | EE_CS) 893#define EE_ENB (0x80 | EE_CS)
879 894
880/* Delay between EEPROM clock transitions. 895/* Delay between EEPROM clock transitions.
881 No extra delay is needed with 33Mhz PCI, but 66Mhz may change this. 896 No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
882 */ 897*/
883 898
884#define eeprom_delay() readl(ee_addr) 899#define eeprom_delay() readl(ee_addr)
885 900
886/* The EEPROM commands include the alway-set leading bit. */ 901/* The EEPROM commands include the alway-set leading bit. */
887#define EE_WRITE_CMD (5) 902#define EE_WRITE_CMD (5)
888#define EE_READ_CMD (6) 903#define EE_READ_CMD (6)
889#define EE_ERASE_CMD (7) 904#define EE_ERASE_CMD (7)
890 905
891static int __devinit read_eeprom (void *ioaddr, int location, int addr_len) 906static int __devinit read_eeprom(void *ioaddr, int location, int addr_len)
892{ 907{
893 int i; 908 int i;
894 unsigned retval = 0; 909 unsigned retval = 0;
895 void *ee_addr = ioaddr + Cfg9346; 910 void *ee_addr = ioaddr + Cfg9346;
896 int read_cmd = location | (EE_READ_CMD << addr_len); 911 int read_cmd = location | (EE_READ_CMD << addr_len);
897 912
898 DPRINTK ("ENTER\n"); 913 DPRINTK("ENTER\n");
899 914
900 writeb (EE_ENB & ~EE_CS, ee_addr); 915 writeb(EE_ENB & ~EE_CS, ee_addr);
901 writeb (EE_ENB, ee_addr); 916 writeb(EE_ENB, ee_addr);
902 eeprom_delay (); 917 eeprom_delay();
903 918
904 /* Shift the read command bits out. */ 919 /* Shift the read command bits out. */
905 for (i = 4 + addr_len; i >= 0; i--) { 920 for (i = 4 + addr_len; i >= 0; i--) {
906 int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0; 921 int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
907 writeb (EE_ENB | dataval, ee_addr); 922 writeb(EE_ENB | dataval, ee_addr);
908 eeprom_delay (); 923 eeprom_delay();
909 writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr); 924 writeb(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
910 eeprom_delay (); 925 eeprom_delay();
911 } 926 }
912 writeb (EE_ENB, ee_addr); 927 writeb(EE_ENB, ee_addr);
913 eeprom_delay (); 928 eeprom_delay();
914 929
915 for (i = 16; i > 0; i--) { 930 for (i = 16; i > 0; i--) {
916 writeb (EE_ENB | EE_SHIFT_CLK, ee_addr); 931 writeb(EE_ENB | EE_SHIFT_CLK, ee_addr);
917 eeprom_delay (); 932 eeprom_delay();
918 retval = 933 retval =
919 (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 : 934 (retval << 1) | ((readb(ee_addr) & EE_DATA_READ) ? 1 :
920 0); 935 0);
921 writeb (EE_ENB, ee_addr); 936 writeb(EE_ENB, ee_addr);
922 eeprom_delay (); 937 eeprom_delay();
923 } 938 }
924 939
925 /* Terminate the EEPROM access. */ 940 /* Terminate the EEPROM access. */
926 writeb (~EE_CS, ee_addr); 941 writeb(~EE_CS, ee_addr);
927 eeprom_delay (); 942 eeprom_delay();
928 943
929 DPRINTK ("EXIT - returning %d\n", retval); 944 DPRINTK("EXIT - returning %d\n", retval);
930 return retval; 945 return retval;
931} 946}
932 947
@@ -936,12 +951,12 @@ static int __devinit read_eeprom (void *ioaddr, int location, int addr_len)
936 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually 951 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
937 met by back-to-back PCI I/O cycles, but we insert a delay to avoid 952 met by back-to-back PCI I/O cycles, but we insert a delay to avoid
938 "overclocking" issues. */ 953 "overclocking" issues. */
939#define MDIO_DIR 0x80 954#define MDIO_DIR 0x80
940#define MDIO_DATA_OUT 0x04 955#define MDIO_DATA_OUT 0x04
941#define MDIO_DATA_IN 0x02 956#define MDIO_DATA_IN 0x02
942#define MDIO_CLK 0x01 957#define MDIO_CLK 0x01
943#define MDIO_WRITE0 (MDIO_DIR) 958#define MDIO_WRITE0 (MDIO_DIR)
944#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT) 959#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
945 960
946#define mdio_delay() readb(mdio_addr) 961#define mdio_delay() readb(mdio_addr)
947 962
@@ -959,24 +974,24 @@ static char mii_2_8139_map[8] = {
959 974
960 975
961/* Syncronize the MII management interface by shifting 32 one bits out. */ 976/* Syncronize the MII management interface by shifting 32 one bits out. */
962static void mdio_sync (void *mdio_addr) 977static void mdio_sync(void *mdio_addr)
963{ 978{
964 int i; 979 int i;
965 980
966 DPRINTK ("ENTER\n"); 981 DPRINTK("ENTER\n");
967 982
968 for (i = 32; i >= 0; i--) { 983 for (i = 32; i >= 0; i--) {
969 writeb (MDIO_WRITE1, mdio_addr); 984 writeb(MDIO_WRITE1, mdio_addr);
970 mdio_delay (); 985 mdio_delay();
971 writeb (MDIO_WRITE1 | MDIO_CLK, mdio_addr); 986 writeb(MDIO_WRITE1 | MDIO_CLK, mdio_addr);
972 mdio_delay (); 987 mdio_delay();
973 } 988 }
974 989
975 DPRINTK ("EXIT\n"); 990 DPRINTK("EXIT\n");
976} 991}
977 992
978 993
979static int mdio_read (struct net_device *dev, int phy_id, int location) 994static int mdio_read(struct net_device *dev, int phy_id, int location)
980{ 995{
981 struct netdrv_private *tp = netdev_priv(dev); 996 struct netdrv_private *tp = netdev_priv(dev);
982 void *mdio_addr = tp->mmio_addr + Config4; 997 void *mdio_addr = tp->mmio_addr + Config4;
@@ -984,97 +999,94 @@ static int mdio_read (struct net_device *dev, int phy_id, int location)
984 int retval = 0; 999 int retval = 0;
985 int i; 1000 int i;
986 1001
987 DPRINTK ("ENTER\n"); 1002 DPRINTK("ENTER\n");
988 1003
989 if (phy_id > 31) { /* Really a 8139. Use internal registers. */ 1004 if (phy_id > 31) { /* Really a 8139. Use internal registers. */
990 DPRINTK ("EXIT after directly using 8139 internal regs\n"); 1005 DPRINTK("EXIT after directly using 8139 internal regs\n");
991 return location < 8 && mii_2_8139_map[location] ? 1006 return location < 8 && mii_2_8139_map[location] ?
992 readw (tp->mmio_addr + mii_2_8139_map[location]) : 0; 1007 readw(tp->mmio_addr + mii_2_8139_map[location]) : 0;
993 } 1008 }
994 mdio_sync (mdio_addr); 1009 mdio_sync(mdio_addr);
995 /* Shift the read command bits out. */ 1010 /* Shift the read command bits out. */
996 for (i = 15; i >= 0; i--) { 1011 for (i = 15; i >= 0; i--) {
997 int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0; 1012 int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
998 1013
999 writeb (MDIO_DIR | dataval, mdio_addr); 1014 writeb(MDIO_DIR | dataval, mdio_addr);
1000 mdio_delay (); 1015 mdio_delay();
1001 writeb (MDIO_DIR | dataval | MDIO_CLK, mdio_addr); 1016 writeb(MDIO_DIR | dataval | MDIO_CLK, mdio_addr);
1002 mdio_delay (); 1017 mdio_delay();
1003 } 1018 }
1004 1019
1005 /* Read the two transition, 16 data, and wire-idle bits. */ 1020 /* Read the two transition, 16 data, and wire-idle bits. */
1006 for (i = 19; i > 0; i--) { 1021 for (i = 19; i > 0; i--) {
1007 writeb (0, mdio_addr); 1022 writeb(0, mdio_addr);
1008 mdio_delay (); 1023 mdio_delay();
1009 retval = 1024 retval = ((retval << 1) | ((readb(mdio_addr) & MDIO_DATA_IN))
1010 (retval << 1) | ((readb (mdio_addr) & MDIO_DATA_IN) ? 1 1025 ? 1 : 0);
1011 : 0); 1026 writeb(MDIO_CLK, mdio_addr);
1012 writeb (MDIO_CLK, mdio_addr); 1027 mdio_delay();
1013 mdio_delay ();
1014 } 1028 }
1015 1029
1016 DPRINTK ("EXIT, returning %d\n", (retval >> 1) & 0xffff); 1030 DPRINTK("EXIT, returning %d\n", (retval >> 1) & 0xffff);
1017 return (retval >> 1) & 0xffff; 1031 return (retval >> 1) & 0xffff;
1018} 1032}
1019 1033
1020 1034
1021static void mdio_write (struct net_device *dev, int phy_id, int location, 1035static void mdio_write(struct net_device *dev, int phy_id, int location,
1022 int value) 1036 int value)
1023{ 1037{
1024 struct netdrv_private *tp = netdev_priv(dev); 1038 struct netdrv_private *tp = netdev_priv(dev);
1025 void *mdio_addr = tp->mmio_addr + Config4; 1039 void *mdio_addr = tp->mmio_addr + Config4;
1026 int mii_cmd = 1040 int mii_cmd =
1027 (0x5002 << 16) | (phy_id << 23) | (location << 18) | value; 1041 (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
1028 int i; 1042 int i;
1029 1043
1030 DPRINTK ("ENTER\n"); 1044 DPRINTK("ENTER\n");
1031 1045
1032 if (phy_id > 31) { /* Really a 8139. Use internal registers. */ 1046 if (phy_id > 31) { /* Really a 8139. Use internal registers. */
1033 if (location < 8 && mii_2_8139_map[location]) { 1047 if (location < 8 && mii_2_8139_map[location]) {
1034 writew (value, 1048 writew(value,
1035 tp->mmio_addr + mii_2_8139_map[location]); 1049 tp->mmio_addr + mii_2_8139_map[location]);
1036 readw (tp->mmio_addr + mii_2_8139_map[location]); 1050 readw(tp->mmio_addr + mii_2_8139_map[location]);
1037 } 1051 }
1038 DPRINTK ("EXIT after directly using 8139 internal regs\n"); 1052 DPRINTK("EXIT after directly using 8139 internal regs\n");
1039 return; 1053 return;
1040 } 1054 }
1041 mdio_sync (mdio_addr); 1055 mdio_sync(mdio_addr);
1042 1056
1043 /* Shift the command bits out. */ 1057 /* Shift the command bits out. */
1044 for (i = 31; i >= 0; i--) { 1058 for (i = 31; i >= 0; i--) {
1045 int dataval = 1059 int dataval =
1046 (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; 1060 (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
1047 writeb (dataval, mdio_addr); 1061 writeb(dataval, mdio_addr);
1048 mdio_delay (); 1062 mdio_delay();
1049 writeb (dataval | MDIO_CLK, mdio_addr); 1063 writeb(dataval | MDIO_CLK, mdio_addr);
1050 mdio_delay (); 1064 mdio_delay();
1051 } 1065 }
1052 1066
1053 /* Clear out extra bits. */ 1067 /* Clear out extra bits. */
1054 for (i = 2; i > 0; i--) { 1068 for (i = 2; i > 0; i--) {
1055 writeb (0, mdio_addr); 1069 writeb(0, mdio_addr);
1056 mdio_delay (); 1070 mdio_delay();
1057 writeb (MDIO_CLK, mdio_addr); 1071 writeb(MDIO_CLK, mdio_addr);
1058 mdio_delay (); 1072 mdio_delay();
1059 } 1073 }
1060 1074
1061 DPRINTK ("EXIT\n"); 1075 DPRINTK("EXIT\n");
1062} 1076}
1063 1077
1064 1078
1065static int netdrv_open (struct net_device *dev) 1079static int netdrv_open(struct net_device *dev)
1066{ 1080{
1067 struct netdrv_private *tp = netdev_priv(dev); 1081 struct netdrv_private *tp = netdev_priv(dev);
1068 int retval; 1082 int retval;
1069#ifdef NETDRV_DEBUG
1070 void *ioaddr = tp->mmio_addr; 1083 void *ioaddr = tp->mmio_addr;
1071#endif
1072 1084
1073 DPRINTK ("ENTER\n"); 1085 DPRINTK("ENTER\n");
1074 1086
1075 retval = request_irq (dev->irq, netdrv_interrupt, IRQF_SHARED, dev->name, dev); 1087 retval = request_irq(dev->irq, netdrv_interrupt, IRQF_SHARED, dev->name, dev);
1076 if (retval) { 1088 if (retval) {
1077 DPRINTK ("EXIT, returning %d\n", retval); 1089 DPRINTK("EXIT, returning %d\n", retval);
1078 return retval; 1090 return retval;
1079 } 1091 }
1080 1092
@@ -1092,7 +1104,7 @@ static int netdrv_open (struct net_device *dev)
1092 pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN, 1104 pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
1093 tp->rx_ring, tp->rx_ring_dma); 1105 tp->rx_ring, tp->rx_ring_dma);
1094 1106
1095 DPRINTK ("EXIT, returning -ENOMEM\n"); 1107 DPRINTK("EXIT, returning -ENOMEM\n");
1096 return -ENOMEM; 1108 return -ENOMEM;
1097 1109
1098 } 1110 }
@@ -1100,109 +1112,108 @@ static int netdrv_open (struct net_device *dev)
1100 tp->full_duplex = tp->duplex_lock; 1112 tp->full_duplex = tp->duplex_lock;
1101 tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000; 1113 tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000;
1102 1114
1103 netdrv_init_ring (dev); 1115 netdrv_init_ring(dev);
1104 netdrv_hw_start (dev); 1116 netdrv_hw_start(dev);
1105 1117
1106 DPRINTK ("%s: netdrv_open() ioaddr %#lx IRQ %d" 1118 netdev_dbg(dev, "ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n",
1107 " GP Pins %2.2x %s-duplex.\n", 1119 (unsigned long long)pci_resource_start(tp->pci_dev, 1),
1108 dev->name, pci_resource_start (tp->pci_dev, 1), 1120 dev->irq, NETDRV_R8(MediaStatus),
1109 dev->irq, NETDRV_R8 (MediaStatus), 1121 tp->full_duplex ? "full" : "half");
1110 tp->full_duplex ? "full" : "half");
1111 1122
1112 /* Set the timer to switch to check for link beat and perhaps switch 1123 /* Set the timer to switch to check for link beat and perhaps switch
1113 to an alternate media type. */ 1124 to an alternate media type. */
1114 init_timer (&tp->timer); 1125 init_timer(&tp->timer);
1115 tp->timer.expires = jiffies + 3 * HZ; 1126 tp->timer.expires = jiffies + 3 * HZ;
1116 tp->timer.data = (unsigned long) dev; 1127 tp->timer.data = (unsigned long) dev;
1117 tp->timer.function = &netdrv_timer; 1128 tp->timer.function = &netdrv_timer;
1118 add_timer (&tp->timer); 1129 add_timer(&tp->timer);
1119 1130
1120 DPRINTK ("EXIT, returning 0\n"); 1131 DPRINTK("EXIT, returning 0\n");
1121 return 0; 1132 return 0;
1122} 1133}
1123 1134
1124 1135
1125/* Start the hardware at open or resume. */ 1136/* Start the hardware at open or resume. */
1126static void netdrv_hw_start (struct net_device *dev) 1137static void netdrv_hw_start(struct net_device *dev)
1127{ 1138{
1128 struct netdrv_private *tp = netdev_priv(dev); 1139 struct netdrv_private *tp = netdev_priv(dev);
1129 void *ioaddr = tp->mmio_addr; 1140 void *ioaddr = tp->mmio_addr;
1130 u32 i; 1141 u32 i;
1131 1142
1132 DPRINTK ("ENTER\n"); 1143 DPRINTK("ENTER\n");
1133 1144
1134 /* Soft reset the chip. */ 1145 /* Soft reset the chip. */
1135 NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear) | CmdReset); 1146 NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) | CmdReset);
1136 udelay (100); 1147 udelay(100);
1137 1148
1138 /* Check that the chip has finished the reset. */ 1149 /* Check that the chip has finished the reset. */
1139 for (i = 1000; i > 0; i--) 1150 for (i = 1000; i > 0; i--)
1140 if ((NETDRV_R8 (ChipCmd) & CmdReset) == 0) 1151 if ((NETDRV_R8(ChipCmd) & CmdReset) == 0)
1141 break; 1152 break;
1142 1153
1143 /* Restore our idea of the MAC address. */ 1154 /* Restore our idea of the MAC address. */
1144 NETDRV_W32_F (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0))); 1155 NETDRV_W32_F(MAC0 + 0, cpu_to_le32(*(u32 *)(dev->dev_addr + 0)));
1145 NETDRV_W32_F (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4))); 1156 NETDRV_W32_F(MAC0 + 4, cpu_to_le32(*(u32 *)(dev->dev_addr + 4)));
1146 1157
1147 /* Must enable Tx/Rx before setting transfer thresholds! */ 1158 /* Must enable Tx/Rx before setting transfer thresholds! */
1148 NETDRV_W8_F (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear) | 1159 NETDRV_W8_F(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) |
1149 CmdRxEnb | CmdTxEnb); 1160 CmdRxEnb | CmdTxEnb);
1150 1161
1151 i = netdrv_rx_config | 1162 i = netdrv_rx_config |
1152 (NETDRV_R32 (RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask); 1163 (NETDRV_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
1153 NETDRV_W32_F (RxConfig, i); 1164 NETDRV_W32_F(RxConfig, i);
1154 1165
1155 /* Check this value: the documentation for IFG contradicts ifself. */ 1166 /* Check this value: the documentation for IFG contradicts ifself. */
1156 NETDRV_W32 (TxConfig, (TX_DMA_BURST << TxDMAShift)); 1167 NETDRV_W32(TxConfig, (TX_DMA_BURST << TxDMAShift));
1157 1168
1158 /* unlock Config[01234] and BMCR register writes */ 1169 /* unlock Config[01234] and BMCR register writes */
1159 NETDRV_W8_F (Cfg9346, Cfg9346_Unlock); 1170 NETDRV_W8_F(Cfg9346, Cfg9346_Unlock);
1160 udelay (10); 1171 udelay(10);
1161 1172
1162 tp->cur_rx = 0; 1173 tp->cur_rx = 0;
1163 1174
1164 /* Lock Config[01234] and BMCR register writes */ 1175 /* Lock Config[01234] and BMCR register writes */
1165 NETDRV_W8_F (Cfg9346, Cfg9346_Lock); 1176 NETDRV_W8_F(Cfg9346, Cfg9346_Lock);
1166 udelay (10); 1177 udelay(10);
1167 1178
1168 /* init Rx ring buffer DMA address */ 1179 /* init Rx ring buffer DMA address */
1169 NETDRV_W32_F (RxBuf, tp->rx_ring_dma); 1180 NETDRV_W32_F(RxBuf, tp->rx_ring_dma);
1170 1181
1171 /* init Tx buffer DMA addresses */ 1182 /* init Tx buffer DMA addresses */
1172 for (i = 0; i < NUM_TX_DESC; i++) 1183 for (i = 0; i < NUM_TX_DESC; i++)
1173 NETDRV_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs)); 1184 NETDRV_W32_F(TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));
1174 1185
1175 NETDRV_W32_F (RxMissed, 0); 1186 NETDRV_W32_F(RxMissed, 0);
1176 1187
1177 netdrv_set_rx_mode (dev); 1188 netdrv_set_rx_mode(dev);
1178 1189
1179 /* no early-rx interrupts */ 1190 /* no early-rx interrupts */
1180 NETDRV_W16 (MultiIntr, NETDRV_R16 (MultiIntr) & MultiIntrClear); 1191 NETDRV_W16(MultiIntr, NETDRV_R16(MultiIntr) & MultiIntrClear);
1181 1192
1182 /* make sure RxTx has started */ 1193 /* make sure RxTx has started */
1183 NETDRV_W8_F (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear) | 1194 NETDRV_W8_F(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) |
1184 CmdRxEnb | CmdTxEnb); 1195 CmdRxEnb | CmdTxEnb);
1185 1196
1186 /* Enable all known interrupts by setting the interrupt mask. */ 1197 /* Enable all known interrupts by setting the interrupt mask. */
1187 NETDRV_W16_F (IntrMask, netdrv_intr_mask); 1198 NETDRV_W16_F(IntrMask, netdrv_intr_mask);
1188 1199
1189 netif_start_queue (dev); 1200 netif_start_queue(dev);
1190 1201
1191 DPRINTK ("EXIT\n"); 1202 DPRINTK("EXIT\n");
1192} 1203}
1193 1204
1194 1205
1195/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ 1206/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1196static void netdrv_init_ring (struct net_device *dev) 1207static void netdrv_init_ring(struct net_device *dev)
1197{ 1208{
1198 struct netdrv_private *tp = netdev_priv(dev); 1209 struct netdrv_private *tp = netdev_priv(dev);
1199 int i; 1210 int i;
1200 1211
1201 DPRINTK ("ENTER\n"); 1212 DPRINTK("ENTER\n");
1202 1213
1203 tp->cur_rx = 0; 1214 tp->cur_rx = 0;
1204 atomic_set (&tp->cur_tx, 0); 1215 atomic_set(&tp->cur_tx, 0);
1205 atomic_set (&tp->dirty_tx, 0); 1216 atomic_set(&tp->dirty_tx, 0);
1206 1217
1207 for (i = 0; i < NUM_TX_DESC; i++) { 1218 for (i = 0; i < NUM_TX_DESC; i++) {
1208 tp->tx_info[i].skb = NULL; 1219 tp->tx_info[i].skb = NULL;
@@ -1210,11 +1221,11 @@ static void netdrv_init_ring (struct net_device *dev)
1210 tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE]; 1221 tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE];
1211 } 1222 }
1212 1223
1213 DPRINTK ("EXIT\n"); 1224 DPRINTK("EXIT\n");
1214} 1225}
1215 1226
1216 1227
1217static void netdrv_timer (unsigned long data) 1228static void netdrv_timer(unsigned long data)
1218{ 1229{
1219 struct net_device *dev = (struct net_device *) data; 1230 struct net_device *dev = (struct net_device *) data;
1220 struct netdrv_private *tp = netdev_priv(dev); 1231 struct netdrv_private *tp = netdev_priv(dev);
@@ -1222,58 +1233,54 @@ static void netdrv_timer (unsigned long data)
1222 int next_tick = 60 * HZ; 1233 int next_tick = 60 * HZ;
1223 int mii_lpa; 1234 int mii_lpa;
1224 1235
1225 mii_lpa = mdio_read (dev, tp->phys[0], MII_LPA); 1236 mii_lpa = mdio_read(dev, tp->phys[0], MII_LPA);
1226 1237
1227 if (!tp->duplex_lock && mii_lpa != 0xffff) { 1238 if (!tp->duplex_lock && mii_lpa != 0xffff) {
1228 int duplex = ((mii_lpa & LPA_100FULL) || 1239 int duplex = ((mii_lpa & LPA_100FULL) ||
1229 (mii_lpa & 0x01C0) == 0x0040); 1240 (mii_lpa & 0x01C0) == 0x0040);
1230 if (tp->full_duplex != duplex) { 1241 if (tp->full_duplex != duplex) {
1231 tp->full_duplex = duplex; 1242 tp->full_duplex = duplex;
1232 printk (KERN_INFO 1243 netdev_info(dev, "Setting %s-duplex based on MII #%d link partner ability of %04x\n",
1233 "%s: Setting %s-duplex based on MII #%d link" 1244 tp->full_duplex ? "full" : "half",
1234 " partner ability of %4.4x.\n", dev->name, 1245 tp->phys[0], mii_lpa);
1235 tp->full_duplex ? "full" : "half", 1246 NETDRV_W8(Cfg9346, Cfg9346_Unlock);
1236 tp->phys[0], mii_lpa); 1247 NETDRV_W8(Config1, tp->full_duplex ? 0x60 : 0x20);
1237 NETDRV_W8 (Cfg9346, Cfg9346_Unlock); 1248 NETDRV_W8(Cfg9346, Cfg9346_Lock);
1238 NETDRV_W8 (Config1, tp->full_duplex ? 0x60 : 0x20);
1239 NETDRV_W8 (Cfg9346, Cfg9346_Lock);
1240 } 1249 }
1241 } 1250 }
1242 1251
1243 DPRINTK ("%s: Media selection tick, Link partner %4.4x.\n", 1252 netdev_dbg(dev, "Media selection tick, Link partner %04x\n",
1244 dev->name, NETDRV_R16 (NWayLPAR)); 1253 NETDRV_R16(NWayLPAR));
1245 DPRINTK ("%s: Other registers are IntMask %4.4x IntStatus %4.4x" 1254 netdev_dbg(dev, "Other registers are IntMask %04x IntStatus %04x RxStatus %04lx\n",
1246 " RxStatus %4.4x.\n", dev->name, 1255 NETDRV_R16(IntrMask),
1247 NETDRV_R16 (IntrMask), 1256 NETDRV_R16(IntrStatus),
1248 NETDRV_R16 (IntrStatus), 1257 NETDRV_R32(RxEarlyStatus));
1249 NETDRV_R32 (RxEarlyStatus)); 1258 netdev_dbg(dev, "Chip config %02x %02x\n",
1250 DPRINTK ("%s: Chip config %2.2x %2.2x.\n", 1259 NETDRV_R8(Config0), NETDRV_R8(Config1));
1251 dev->name, NETDRV_R8 (Config0),
1252 NETDRV_R8 (Config1));
1253 1260
1254 tp->timer.expires = jiffies + next_tick; 1261 tp->timer.expires = jiffies + next_tick;
1255 add_timer (&tp->timer); 1262 add_timer(&tp->timer);
1256} 1263}
1257 1264
1258 1265
1259static void netdrv_tx_clear (struct net_device *dev) 1266static void netdrv_tx_clear(struct net_device *dev)
1260{ 1267{
1261 int i; 1268 int i;
1262 struct netdrv_private *tp = netdev_priv(dev); 1269 struct netdrv_private *tp = netdev_priv(dev);
1263 1270
1264 atomic_set (&tp->cur_tx, 0); 1271 atomic_set(&tp->cur_tx, 0);
1265 atomic_set (&tp->dirty_tx, 0); 1272 atomic_set(&tp->dirty_tx, 0);
1266 1273
1267 /* Dump the unsent Tx packets. */ 1274 /* Dump the unsent Tx packets. */
1268 for (i = 0; i < NUM_TX_DESC; i++) { 1275 for (i = 0; i < NUM_TX_DESC; i++) {
1269 struct ring_info *rp = &tp->tx_info[i]; 1276 struct ring_info *rp = &tp->tx_info[i];
1270 if (rp->mapping != 0) { 1277 if (rp->mapping != 0) {
1271 pci_unmap_single (tp->pci_dev, rp->mapping, 1278 pci_unmap_single(tp->pci_dev, rp->mapping,
1272 rp->skb->len, PCI_DMA_TODEVICE); 1279 rp->skb->len, PCI_DMA_TODEVICE);
1273 rp->mapping = 0; 1280 rp->mapping = 0;
1274 } 1281 }
1275 if (rp->skb) { 1282 if (rp->skb) {
1276 dev_kfree_skb (rp->skb); 1283 dev_kfree_skb(rp->skb);
1277 rp->skb = NULL; 1284 rp->skb = NULL;
1278 dev->stats.tx_dropped++; 1285 dev->stats.tx_dropped++;
1279 } 1286 }
@@ -1281,7 +1288,7 @@ static void netdrv_tx_clear (struct net_device *dev)
1281} 1288}
1282 1289
1283 1290
1284static void netdrv_tx_timeout (struct net_device *dev) 1291static void netdrv_tx_timeout(struct net_device *dev)
1285{ 1292{
1286 struct netdrv_private *tp = netdev_priv(dev); 1293 struct netdrv_private *tp = netdev_priv(dev);
1287 void *ioaddr = tp->mmio_addr; 1294 void *ioaddr = tp->mmio_addr;
@@ -1289,96 +1296,95 @@ static void netdrv_tx_timeout (struct net_device *dev)
1289 u8 tmp8; 1296 u8 tmp8;
1290 unsigned long flags; 1297 unsigned long flags;
1291 1298
1292 DPRINTK ("%s: Transmit timeout, status %2.2x %4.4x " 1299 netdev_dbg(dev, "Transmit timeout, status %02x %04x media %02x\n",
1293 "media %2.2x.\n", dev->name, 1300 NETDRV_R8(ChipCmd),
1294 NETDRV_R8 (ChipCmd), 1301 NETDRV_R16(IntrStatus),
1295 NETDRV_R16 (IntrStatus), 1302 NETDRV_R8(MediaStatus));
1296 NETDRV_R8 (MediaStatus));
1297 1303
1298 /* disable Tx ASAP, if not already */ 1304 /* disable Tx ASAP, if not already */
1299 tmp8 = NETDRV_R8 (ChipCmd); 1305 tmp8 = NETDRV_R8(ChipCmd);
1300 if (tmp8 & CmdTxEnb) 1306 if (tmp8 & CmdTxEnb)
1301 NETDRV_W8 (ChipCmd, tmp8 & ~CmdTxEnb); 1307 NETDRV_W8(ChipCmd, tmp8 & ~CmdTxEnb);
1302 1308
1303 /* Disable interrupts by clearing the interrupt mask. */ 1309 /* Disable interrupts by clearing the interrupt mask. */
1304 NETDRV_W16 (IntrMask, 0x0000); 1310 NETDRV_W16(IntrMask, 0x0000);
1305 1311
1306 /* Emit info to figure out what went wrong. */ 1312 /* Emit info to figure out what went wrong. */
1307 printk (KERN_DEBUG "%s: Tx queue start entry %d dirty entry %d.\n", 1313 netdev_dbg(dev, "Tx queue start entry %d dirty entry %d\n",
1308 dev->name, atomic_read (&tp->cur_tx), 1314 atomic_read(&tp->cur_tx),
1309 atomic_read (&tp->dirty_tx)); 1315 atomic_read(&tp->dirty_tx));
1310 for (i = 0; i < NUM_TX_DESC; i++) 1316 for (i = 0; i < NUM_TX_DESC; i++)
1311 printk (KERN_DEBUG "%s: Tx descriptor %d is %8.8lx.%s\n", 1317 netdev_dbg(dev, "Tx descriptor %d is %08lx%s\n",
1312 dev->name, i, NETDRV_R32 (TxStatus0 + (i * 4)), 1318 i, NETDRV_R32(TxStatus0 + (i * 4)),
1313 i == atomic_read (&tp->dirty_tx) % NUM_TX_DESC ? 1319 i == atomic_read(&tp->dirty_tx) % NUM_TX_DESC ?
1314 " (queue head)" : ""); 1320 "(queue head)" : "");
1315 1321
1316 /* Stop a shared interrupt from scavenging while we are. */ 1322 /* Stop a shared interrupt from scavenging while we are. */
1317 spin_lock_irqsave (&tp->lock, flags); 1323 spin_lock_irqsave(&tp->lock, flags);
1318 1324
1319 netdrv_tx_clear (dev); 1325 netdrv_tx_clear(dev);
1320 1326
1321 spin_unlock_irqrestore (&tp->lock, flags); 1327 spin_unlock_irqrestore(&tp->lock, flags);
1322 1328
1323 /* ...and finally, reset everything */ 1329 /* ...and finally, reset everything */
1324 netdrv_hw_start (dev); 1330 netdrv_hw_start(dev);
1325 1331
1326 netif_wake_queue (dev); 1332 netif_wake_queue(dev);
1327} 1333}
1328 1334
1329 1335
1330 1336
1331static int netdrv_start_xmit (struct sk_buff *skb, struct net_device *dev) 1337static int netdrv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1332{ 1338{
1333 struct netdrv_private *tp = netdev_priv(dev); 1339 struct netdrv_private *tp = netdev_priv(dev);
1334 void *ioaddr = tp->mmio_addr; 1340 void *ioaddr = tp->mmio_addr;
1335 int entry; 1341 int entry;
1336 1342
1337 /* Calculate the next Tx descriptor entry. */ 1343 /* Calculate the next Tx descriptor entry. */
1338 entry = atomic_read (&tp->cur_tx) % NUM_TX_DESC; 1344 entry = atomic_read(&tp->cur_tx) % NUM_TX_DESC;
1339 1345
1340 assert (tp->tx_info[entry].skb == NULL); 1346 assert(tp->tx_info[entry].skb == NULL);
1341 assert (tp->tx_info[entry].mapping == 0); 1347 assert(tp->tx_info[entry].mapping == 0);
1342 1348
1343 tp->tx_info[entry].skb = skb; 1349 tp->tx_info[entry].skb = skb;
1344 /* tp->tx_info[entry].mapping = 0; */ 1350 /* tp->tx_info[entry].mapping = 0; */
1345 skb_copy_from_linear_data(skb, tp->tx_buf[entry], skb->len); 1351 skb_copy_from_linear_data(skb, tp->tx_buf[entry], skb->len);
1346 1352
1347 /* Note: the chip doesn't have auto-pad! */ 1353 /* Note: the chip doesn't have auto-pad! */
1348 NETDRV_W32 (TxStatus0 + (entry * sizeof(u32)), 1354 NETDRV_W32(TxStatus0 + (entry * sizeof(u32)),
1349 tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); 1355 tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1350 1356
1351 dev->trans_start = jiffies; 1357 dev->trans_start = jiffies;
1352 atomic_inc (&tp->cur_tx); 1358 atomic_inc(&tp->cur_tx);
1353 if ((atomic_read (&tp->cur_tx) - atomic_read (&tp->dirty_tx)) >= NUM_TX_DESC) 1359 if ((atomic_read(&tp->cur_tx) - atomic_read(&tp->dirty_tx)) >= NUM_TX_DESC)
1354 netif_stop_queue (dev); 1360 netif_stop_queue(dev);
1355 1361
1356 DPRINTK ("%s: Queued Tx packet at %p size %u to slot %d.\n", 1362 netdev_dbg(dev, "Queued Tx packet at %p size %u to slot %d\n",
1357 dev->name, skb->data, skb->len, entry); 1363 skb->data, skb->len, entry);
1358 1364
1359 return NETDEV_TX_OK; 1365 return NETDEV_TX_OK;
1360} 1366}
1361 1367
1362 1368
1363static void netdrv_tx_interrupt (struct net_device *dev, 1369static void netdrv_tx_interrupt(struct net_device *dev,
1364 struct netdrv_private *tp, 1370 struct netdrv_private *tp,
1365 void *ioaddr) 1371 void *ioaddr)
1366{ 1372{
1367 int cur_tx, dirty_tx, tx_left; 1373 int cur_tx, dirty_tx, tx_left;
1368 1374
1369 assert (dev != NULL); 1375 assert(dev != NULL);
1370 assert (tp != NULL); 1376 assert(tp != NULL);
1371 assert (ioaddr != NULL); 1377 assert(ioaddr != NULL);
1372 1378
1373 dirty_tx = atomic_read (&tp->dirty_tx); 1379 dirty_tx = atomic_read(&tp->dirty_tx);
1374 1380
1375 cur_tx = atomic_read (&tp->cur_tx); 1381 cur_tx = atomic_read(&tp->cur_tx);
1376 tx_left = cur_tx - dirty_tx; 1382 tx_left = cur_tx - dirty_tx;
1377 while (tx_left > 0) { 1383 while (tx_left > 0) {
1378 int entry = dirty_tx % NUM_TX_DESC; 1384 int entry = dirty_tx % NUM_TX_DESC;
1379 int txstatus; 1385 int txstatus;
1380 1386
1381 txstatus = NETDRV_R32 (TxStatus0 + (entry * sizeof (u32))); 1387 txstatus = NETDRV_R32(TxStatus0 + (entry * sizeof(u32)));
1382 1388
1383 if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted))) 1389 if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted)))
1384 break; /* It still hasn't been Txed */ 1390 break; /* It still hasn't been Txed */
@@ -1386,12 +1392,12 @@ static void netdrv_tx_interrupt (struct net_device *dev,
1386 /* Note: TxCarrierLost is always asserted at 100mbps. */ 1392 /* Note: TxCarrierLost is always asserted at 100mbps. */
1387 if (txstatus & (TxOutOfWindow | TxAborted)) { 1393 if (txstatus & (TxOutOfWindow | TxAborted)) {
1388 /* There was an major error, log it. */ 1394 /* There was an major error, log it. */
1389 DPRINTK ("%s: Transmit error, Tx status %8.8x.\n", 1395 netdev_dbg(dev, "Transmit error, Tx status %#08x\n",
1390 dev->name, txstatus); 1396 txstatus);
1391 dev->stats.tx_errors++; 1397 dev->stats.tx_errors++;
1392 if (txstatus & TxAborted) { 1398 if (txstatus & TxAborted) {
1393 dev->stats.tx_aborted_errors++; 1399 dev->stats.tx_aborted_errors++;
1394 NETDRV_W32 (TxConfig, TxClearAbt | (TX_DMA_BURST << TxDMAShift)); 1400 NETDRV_W32(TxConfig, TxClearAbt | (TX_DMA_BURST << TxDMAShift));
1395 } 1401 }
1396 if (txstatus & TxCarrierLost) 1402 if (txstatus & TxCarrierLost)
1397 dev->stats.tx_carrier_errors++; 1403 dev->stats.tx_carrier_errors++;
@@ -1417,48 +1423,45 @@ static void netdrv_tx_interrupt (struct net_device *dev,
1417 PCI_DMA_TODEVICE); 1423 PCI_DMA_TODEVICE);
1418 tp->tx_info[entry].mapping = 0; 1424 tp->tx_info[entry].mapping = 0;
1419 } 1425 }
1420 dev_kfree_skb_irq (tp->tx_info[entry].skb); 1426 dev_kfree_skb_irq(tp->tx_info[entry].skb);
1421 tp->tx_info[entry].skb = NULL; 1427 tp->tx_info[entry].skb = NULL;
1422 dirty_tx++; 1428 dirty_tx++;
1423 if (dirty_tx < 0) { /* handle signed int overflow */ 1429 if (dirty_tx < 0) { /* handle signed int overflow */
1424 atomic_sub (cur_tx, &tp->cur_tx); /* XXX racy? */ 1430 atomic_sub(cur_tx, &tp->cur_tx); /* XXX racy? */
1425 dirty_tx = cur_tx - tx_left + 1; 1431 dirty_tx = cur_tx - tx_left + 1;
1426 } 1432 }
1427 if (netif_queue_stopped (dev)) 1433 if (netif_queue_stopped(dev))
1428 netif_wake_queue (dev); 1434 netif_wake_queue(dev);
1429 1435
1430 cur_tx = atomic_read (&tp->cur_tx); 1436 cur_tx = atomic_read(&tp->cur_tx);
1431 tx_left = cur_tx - dirty_tx; 1437 tx_left = cur_tx - dirty_tx;
1432 1438
1433 } 1439 }
1434 1440
1435#ifndef NETDRV_NDEBUG 1441#ifndef NETDRV_NDEBUG
1436 if (atomic_read (&tp->cur_tx) - dirty_tx > NUM_TX_DESC) { 1442 if (atomic_read(&tp->cur_tx) - dirty_tx > NUM_TX_DESC) {
1437 printk (KERN_ERR 1443 netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d\n",
1438 "%s: Out-of-sync dirty pointer, %d vs. %d.\n", 1444 dirty_tx, atomic_read(&tp->cur_tx));
1439 dev->name, dirty_tx, atomic_read (&tp->cur_tx));
1440 dirty_tx += NUM_TX_DESC; 1445 dirty_tx += NUM_TX_DESC;
1441 } 1446 }
1442#endif /* NETDRV_NDEBUG */ 1447#endif /* NETDRV_NDEBUG */
1443 1448
1444 atomic_set (&tp->dirty_tx, dirty_tx); 1449 atomic_set(&tp->dirty_tx, dirty_tx);
1445} 1450}
1446 1451
1447 1452
1448/* TODO: clean this up! Rx reset need not be this intensive */ 1453/* TODO: clean this up! Rx reset need not be this intensive */
1449static void netdrv_rx_err (u32 rx_status, struct net_device *dev, 1454static void netdrv_rx_err(u32 rx_status, struct net_device *dev,
1450 struct netdrv_private *tp, void *ioaddr) 1455 struct netdrv_private *tp, void *ioaddr)
1451{ 1456{
1452 u8 tmp8; 1457 u8 tmp8;
1453 int tmp_work = 1000; 1458 int tmp_work = 1000;
1454 1459
1455 DPRINTK ("%s: Ethernet frame had errors, status %8.8x.\n", 1460 netdev_dbg(dev, "Ethernet frame had errors, status %08x\n", rx_status);
1456 dev->name, rx_status); 1461 if (rx_status & RxTooLong)
1457 if (rx_status & RxTooLong) { 1462 netdev_dbg(dev, "Oversized Ethernet frame, status %04x!\n",
1458 DPRINTK ("%s: Oversized Ethernet frame, status %4.4x!\n", 1463 rx_status);
1459 dev->name, rx_status);
1460 /* A.C.: The chip hangs here. */ 1464 /* A.C.: The chip hangs here. */
1461 }
1462 dev->stats.rx_errors++; 1465 dev->stats.rx_errors++;
1463 if (rx_status & (RxBadSymbol | RxBadAlign)) 1466 if (rx_status & (RxBadSymbol | RxBadAlign))
1464 dev->stats.rx_frame_errors++; 1467 dev->stats.rx_frame_errors++;
@@ -1466,56 +1469,55 @@ static void netdrv_rx_err (u32 rx_status, struct net_device *dev,
1466 dev->stats.rx_length_errors++; 1469 dev->stats.rx_length_errors++;
1467 if (rx_status & RxCRCErr) 1470 if (rx_status & RxCRCErr)
1468 dev->stats.rx_crc_errors++; 1471 dev->stats.rx_crc_errors++;
1469 /* Reset the receiver, based on RealTek recommendation. (Bug?) */ 1472 /* Reset the receiver, based on RealTek recommendation.(Bug?) */
1470 tp->cur_rx = 0; 1473 tp->cur_rx = 0;
1471 1474
1472 /* disable receive */ 1475 /* disable receive */
1473 tmp8 = NETDRV_R8 (ChipCmd) & ChipCmdClear; 1476 tmp8 = NETDRV_R8(ChipCmd) & ChipCmdClear;
1474 NETDRV_W8_F (ChipCmd, tmp8 | CmdTxEnb); 1477 NETDRV_W8_F(ChipCmd, tmp8 | CmdTxEnb);
1475 1478
1476 /* A.C.: Reset the multicast list. */ 1479 /* A.C.: Reset the multicast list. */
1477 netdrv_set_rx_mode (dev); 1480 netdrv_set_rx_mode(dev);
1478 1481
1479 /* XXX potentially temporary hack to 1482 /* XXX potentially temporary hack to
1480 * restart hung receiver */ 1483 * restart hung receiver */
1481 while (--tmp_work > 0) { 1484 while (--tmp_work > 0) {
1482 tmp8 = NETDRV_R8 (ChipCmd); 1485 tmp8 = NETDRV_R8(ChipCmd);
1483 if ((tmp8 & CmdRxEnb) && (tmp8 & CmdTxEnb)) 1486 if ((tmp8 & CmdRxEnb) && (tmp8 & CmdTxEnb))
1484 break; 1487 break;
1485 NETDRV_W8_F (ChipCmd, 1488 NETDRV_W8_F(ChipCmd,
1486 (tmp8 & ChipCmdClear) | CmdRxEnb | CmdTxEnb); 1489 (tmp8 & ChipCmdClear) | CmdRxEnb | CmdTxEnb);
1487 } 1490 }
1488 1491
1489 /* G.S.: Re-enable receiver */ 1492 /* G.S.: Re-enable receiver */
1490 /* XXX temporary hack to work around receiver hang */ 1493 /* XXX temporary hack to work around receiver hang */
1491 netdrv_set_rx_mode (dev); 1494 netdrv_set_rx_mode(dev);
1492 1495
1493 if (tmp_work <= 0) 1496 if (tmp_work <= 0)
1494 printk (KERN_WARNING PFX "tx/rx enable wait too long\n"); 1497 netdev_warn(dev, "tx/rx enable wait too long\n");
1495} 1498}
1496 1499
1497 1500
1498/* The data sheet doesn't describe the Rx ring at all, so I'm guessing at the 1501/* The data sheet doesn't describe the Rx ring at all, so I'm guessing at the
1499 field alignments and semantics. */ 1502 field alignments and semantics. */
1500static void netdrv_rx_interrupt (struct net_device *dev, 1503static void netdrv_rx_interrupt(struct net_device *dev,
1501 struct netdrv_private *tp, void *ioaddr) 1504 struct netdrv_private *tp, void *ioaddr)
1502{ 1505{
1503 unsigned char *rx_ring; 1506 unsigned char *rx_ring;
1504 u16 cur_rx; 1507 u16 cur_rx;
1505 1508
1506 assert (dev != NULL); 1509 assert(dev != NULL);
1507 assert (tp != NULL); 1510 assert(tp != NULL);
1508 assert (ioaddr != NULL); 1511 assert(ioaddr != NULL);
1509 1512
1510 rx_ring = tp->rx_ring; 1513 rx_ring = tp->rx_ring;
1511 cur_rx = tp->cur_rx; 1514 cur_rx = tp->cur_rx;
1512 1515
1513 DPRINTK ("%s: In netdrv_rx(), current %4.4x BufAddr %4.4x," 1516 netdev_dbg(dev, "In netdrv_rx(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
1514 " free to %4.4x, Cmd %2.2x.\n", dev->name, cur_rx, 1517 cur_rx, NETDRV_R16(RxBufAddr),
1515 NETDRV_R16 (RxBufAddr), 1518 NETDRV_R16(RxBufPtr), NETDRV_R8(ChipCmd));
1516 NETDRV_R16 (RxBufPtr), NETDRV_R8 (ChipCmd));
1517 1519
1518 while ((NETDRV_R8 (ChipCmd) & RxBufEmpty) == 0) { 1520 while ((NETDRV_R8(ChipCmd) & RxBufEmpty) == 0) {
1519 int ring_offset = cur_rx % RX_BUF_LEN; 1521 int ring_offset = cur_rx % RX_BUF_LEN;
1520 u32 rx_status; 1522 u32 rx_status;
1521 unsigned int rx_size; 1523 unsigned int rx_size;
@@ -1523,32 +1525,25 @@ static void netdrv_rx_interrupt (struct net_device *dev,
1523 struct sk_buff *skb; 1525 struct sk_buff *skb;
1524 1526
1525 /* read size+status of next frame from DMA ring buffer */ 1527 /* read size+status of next frame from DMA ring buffer */
1526 rx_status = le32_to_cpu (*(u32 *) (rx_ring + ring_offset)); 1528 rx_status = le32_to_cpu(*(u32 *)(rx_ring + ring_offset));
1527 rx_size = rx_status >> 16; 1529 rx_size = rx_status >> 16;
1528 pkt_size = rx_size - 4; 1530 pkt_size = rx_size - 4;
1529 1531
1530 DPRINTK ("%s: netdrv_rx() status %4.4x, size %4.4x," 1532 netdev_dbg(dev, "netdrv_rx() status %04x, size %04x, cur %04x\n",
1531 " cur %4.4x.\n", dev->name, rx_status, 1533 rx_status, rx_size, cur_rx);
1532 rx_size, cur_rx);
1533#if defined(NETDRV_DEBUG) && (NETDRV_DEBUG > 2) 1534#if defined(NETDRV_DEBUG) && (NETDRV_DEBUG > 2)
1534 { 1535 print_hex_dump_bytes("Frame contents: ", HEX_DUMP_OFFSET,
1535 int i; 1536 &rx_ring[ring_offset], 70);
1536 DPRINTK ("%s: Frame contents ", dev->name);
1537 for (i = 0; i < 70; i++)
1538 printk (" %2.2x",
1539 rx_ring[ring_offset + i]);
1540 printk (".\n");
1541 }
1542#endif 1537#endif
1543 1538
1544 /* If Rx err or invalid rx_size/rx_status received 1539 /* If Rx err or invalid rx_size/rx_status received
1545 * (which happens if we get lost in the ring), 1540 *(which happens if we get lost in the ring),
1546 * Rx process gets reset, so we abort any further 1541 * Rx process gets reset, so we abort any further
1547 * Rx processing. 1542 * Rx processing.
1548 */ 1543 */
1549 if ((rx_size > (MAX_ETH_FRAME_SIZE+4)) || 1544 if ((rx_size > (MAX_ETH_FRAME_SIZE+4)) ||
1550 (!(rx_status & RxStatusOK))) { 1545 (!(rx_status & RxStatusOK))) {
1551 netdrv_rx_err (rx_status, dev, tp, ioaddr); 1546 netdrv_rx_err(rx_status, dev, tp, ioaddr);
1552 return; 1547 return;
1553 } 1548 }
1554 1549
@@ -1561,71 +1556,67 @@ static void netdrv_rx_interrupt (struct net_device *dev,
1561 * drop packets here under memory pressure. 1556 * drop packets here under memory pressure.
1562 */ 1557 */
1563 1558
1564 skb = dev_alloc_skb (pkt_size + 2); 1559 skb = dev_alloc_skb(pkt_size + 2);
1565 if (skb) { 1560 if (skb) {
1566 skb_reserve (skb, 2); /* 16 byte align the IP fields. */ 1561 skb_reserve(skb, 2); /* 16 byte align the IP fields. */
1567 1562
1568 skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size); 1563 skb_copy_to_linear_data(skb, &rx_ring[ring_offset + 4], pkt_size);
1569 skb_put (skb, pkt_size); 1564 skb_put(skb, pkt_size);
1570 1565
1571 skb->protocol = eth_type_trans (skb, dev); 1566 skb->protocol = eth_type_trans(skb, dev);
1572 netif_rx (skb); 1567 netif_rx(skb);
1573 dev->stats.rx_bytes += pkt_size; 1568 dev->stats.rx_bytes += pkt_size;
1574 dev->stats.rx_packets++; 1569 dev->stats.rx_packets++;
1575 } else { 1570 } else {
1576 printk (KERN_WARNING 1571 netdev_warn(dev, "Memory squeeze, dropping packet\n");
1577 "%s: Memory squeeze, dropping packet.\n",
1578 dev->name);
1579 dev->stats.rx_dropped++; 1572 dev->stats.rx_dropped++;
1580 } 1573 }
1581 1574
1582 cur_rx = (cur_rx + rx_size + 4 + 3) & ~3; 1575 cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
1583 NETDRV_W16_F (RxBufPtr, cur_rx - 16); 1576 NETDRV_W16_F(RxBufPtr, cur_rx - 16);
1584 } 1577 }
1585 1578
1586 DPRINTK ("%s: Done netdrv_rx(), current %4.4x BufAddr %4.4x," 1579 netdev_dbg(dev, "Done netdrv_rx(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
1587 " free to %4.4x, Cmd %2.2x.\n", dev->name, cur_rx, 1580 cur_rx, NETDRV_R16(RxBufAddr),
1588 NETDRV_R16 (RxBufAddr), 1581 NETDRV_R16(RxBufPtr), NETDRV_R8(ChipCmd));
1589 NETDRV_R16 (RxBufPtr), NETDRV_R8 (ChipCmd));
1590 1582
1591 tp->cur_rx = cur_rx; 1583 tp->cur_rx = cur_rx;
1592} 1584}
1593 1585
1594 1586
1595static void netdrv_weird_interrupt (struct net_device *dev, 1587static void netdrv_weird_interrupt(struct net_device *dev,
1596 struct netdrv_private *tp, 1588 struct netdrv_private *tp,
1597 void *ioaddr, 1589 void *ioaddr,
1598 int status, int link_changed) 1590 int status, int link_changed)
1599{ 1591{
1600 printk (KERN_DEBUG "%s: Abnormal interrupt, status %8.8x.\n", 1592 netdev_printk(KERN_DEBUG, dev, "Abnormal interrupt, status %08x\n",
1601 dev->name, status); 1593 status);
1602 1594
1603 assert (dev != NULL); 1595 assert(dev != NULL);
1604 assert (tp != NULL); 1596 assert(tp != NULL);
1605 assert (ioaddr != NULL); 1597 assert(ioaddr != NULL);
1606 1598
1607 /* Update the error count. */ 1599 /* Update the error count. */
1608 dev->stats.rx_missed_errors += NETDRV_R32 (RxMissed); 1600 dev->stats.rx_missed_errors += NETDRV_R32(RxMissed);
1609 NETDRV_W32 (RxMissed, 0); 1601 NETDRV_W32(RxMissed, 0);
1610 1602
1611 if ((status & RxUnderrun) && link_changed && 1603 if ((status & RxUnderrun) && link_changed &&
1612 (tp->drv_flags & HAS_LNK_CHNG)) { 1604 (tp->drv_flags & HAS_LNK_CHNG)) {
1613 /* Really link-change on new chips. */ 1605 /* Really link-change on new chips. */
1614 int lpar = NETDRV_R16 (NWayLPAR); 1606 int lpar = NETDRV_R16(NWayLPAR);
1615 int duplex = ((lpar & 0x0100) || (lpar & 0x01C0) == 0x0040 || 1607 int duplex = ((lpar & 0x0100) || (lpar & 0x01C0) == 0x0040 ||
1616 tp->duplex_lock); 1608 tp->duplex_lock);
1617 if (tp->full_duplex != duplex) { 1609 if (tp->full_duplex != duplex) {
1618 tp->full_duplex = duplex; 1610 tp->full_duplex = duplex;
1619 NETDRV_W8 (Cfg9346, Cfg9346_Unlock); 1611 NETDRV_W8(Cfg9346, Cfg9346_Unlock);
1620 NETDRV_W8 (Config1, tp->full_duplex ? 0x60 : 0x20); 1612 NETDRV_W8(Config1, tp->full_duplex ? 0x60 : 0x20);
1621 NETDRV_W8 (Cfg9346, Cfg9346_Lock); 1613 NETDRV_W8(Cfg9346, Cfg9346_Lock);
1622 } 1614 }
1623 status &= ~RxUnderrun; 1615 status &= ~RxUnderrun;
1624 } 1616 }
1625 1617
1626 /* XXX along with netdrv_rx_err, are we double-counting errors? */ 1618 /* XXX along with netdrv_rx_err, are we double-counting errors? */
1627 if (status & 1619 if (status & (RxUnderrun | RxOverflow | RxErr | RxFIFOOver))
1628 (RxUnderrun | RxOverflow | RxErr | RxFIFOOver))
1629 dev->stats.rx_errors++; 1620 dev->stats.rx_errors++;
1630 1621
1631 if (status & (PCSTimeout)) 1622 if (status & (PCSTimeout))
@@ -1634,22 +1625,21 @@ static void netdrv_weird_interrupt (struct net_device *dev,
1634 dev->stats.rx_fifo_errors++; 1625 dev->stats.rx_fifo_errors++;
1635 if (status & RxOverflow) { 1626 if (status & RxOverflow) {
1636 dev->stats.rx_over_errors++; 1627 dev->stats.rx_over_errors++;
1637 tp->cur_rx = NETDRV_R16 (RxBufAddr) % RX_BUF_LEN; 1628 tp->cur_rx = NETDRV_R16(RxBufAddr) % RX_BUF_LEN;
1638 NETDRV_W16_F (RxBufPtr, tp->cur_rx - 16); 1629 NETDRV_W16_F(RxBufPtr, tp->cur_rx - 16);
1639 } 1630 }
1640 if (status & PCIErr) { 1631 if (status & PCIErr) {
1641 u16 pci_cmd_status; 1632 u16 pci_cmd_status;
1642 pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status); 1633 pci_read_config_word(tp->pci_dev, PCI_STATUS, &pci_cmd_status);
1643 1634
1644 printk (KERN_ERR "%s: PCI Bus error %4.4x.\n", 1635 netdev_err(dev, "PCI Bus error %04x\n", pci_cmd_status);
1645 dev->name, pci_cmd_status);
1646 } 1636 }
1647} 1637}
1648 1638
1649 1639
1650/* The interrupt handler does all of the Rx thread work and cleans up 1640/* The interrupt handler does all of the Rx thread work and cleans up
1651 after the Tx thread. */ 1641 after the Tx thread. */
1652static irqreturn_t netdrv_interrupt (int irq, void *dev_instance) 1642static irqreturn_t netdrv_interrupt(int irq, void *dev_instance)
1653{ 1643{
1654 struct net_device *dev = (struct net_device *) dev_instance; 1644 struct net_device *dev = (struct net_device *) dev_instance;
1655 struct netdrv_private *tp = netdev_priv(dev); 1645 struct netdrv_private *tp = netdev_priv(dev);
@@ -1658,22 +1648,21 @@ static irqreturn_t netdrv_interrupt (int irq, void *dev_instance)
1658 int status = 0, link_changed = 0; /* avoid bogus "uninit" warning */ 1648 int status = 0, link_changed = 0; /* avoid bogus "uninit" warning */
1659 int handled = 0; 1649 int handled = 0;
1660 1650
1661 spin_lock (&tp->lock); 1651 spin_lock(&tp->lock);
1662 1652
1663 do { 1653 do {
1664 status = NETDRV_R16 (IntrStatus); 1654 status = NETDRV_R16(IntrStatus);
1665 1655
1666 /* h/w no longer present (hotplug?) or major error, bail */ 1656 /* h/w no longer present(hotplug?) or major error, bail */
1667 if (status == 0xFFFF) 1657 if (status == 0xFFFF)
1668 break; 1658 break;
1669 1659
1670 handled = 1; 1660 handled = 1;
1671 /* Acknowledge all of the current interrupt sources ASAP */ 1661 /* Acknowledge all of the current interrupt sources ASAP */
1672 NETDRV_W16_F (IntrStatus, status); 1662 NETDRV_W16_F(IntrStatus, status);
1673 1663
1674 DPRINTK ("%s: interrupt status=%#4.4x new intstat=%#4.4x.\n", 1664 netdev_dbg(dev, "interrupt status=%#04x new intstat=%#04x\n",
1675 dev->name, status, 1665 status, NETDRV_R16(IntrStatus));
1676 NETDRV_R16 (IntrStatus));
1677 1666
1678 if ((status & 1667 if ((status &
1679 (PCIErr | PCSTimeout | RxUnderrun | RxOverflow | 1668 (PCIErr | PCSTimeout | RxUnderrun | RxOverflow |
@@ -1682,69 +1671,67 @@ static irqreturn_t netdrv_interrupt (int irq, void *dev_instance)
1682 1671
1683 /* Check uncommon events with one test. */ 1672 /* Check uncommon events with one test. */
1684 if (status & (PCIErr | PCSTimeout | RxUnderrun | RxOverflow | 1673 if (status & (PCIErr | PCSTimeout | RxUnderrun | RxOverflow |
1685 RxFIFOOver | TxErr | RxErr)) 1674 RxFIFOOver | TxErr | RxErr))
1686 netdrv_weird_interrupt (dev, tp, ioaddr, 1675 netdrv_weird_interrupt(dev, tp, ioaddr,
1687 status, link_changed); 1676 status, link_changed);
1688 1677
1689 if (status & (RxOK | RxUnderrun | RxOverflow | RxFIFOOver)) /* Rx interrupt */ 1678 if (status & (RxOK | RxUnderrun | RxOverflow | RxFIFOOver)) /* Rx interrupt */
1690 netdrv_rx_interrupt (dev, tp, ioaddr); 1679 netdrv_rx_interrupt(dev, tp, ioaddr);
1691 1680
1692 if (status & (TxOK | TxErr)) 1681 if (status & (TxOK | TxErr))
1693 netdrv_tx_interrupt (dev, tp, ioaddr); 1682 netdrv_tx_interrupt(dev, tp, ioaddr);
1694 1683
1695 boguscnt--; 1684 boguscnt--;
1696 } while (boguscnt > 0); 1685 } while (boguscnt > 0);
1697 1686
1698 if (boguscnt <= 0) { 1687 if (boguscnt <= 0) {
1699 printk (KERN_WARNING 1688 netdev_warn(dev, "Too much work at interrupt, IntrStatus=%#04x\n",
1700 "%s: Too much work at interrupt, " 1689 status);
1701 "IntrStatus=0x%4.4x.\n", dev->name,
1702 status);
1703 1690
1704 /* Clear all interrupt sources. */ 1691 /* Clear all interrupt sources. */
1705 NETDRV_W16 (IntrStatus, 0xffff); 1692 NETDRV_W16(IntrStatus, 0xffff);
1706 } 1693 }
1707 1694
1708 spin_unlock (&tp->lock); 1695 spin_unlock(&tp->lock);
1709 1696
1710 DPRINTK ("%s: exiting interrupt, intr_status=%#4.4x.\n", 1697 netdev_dbg(dev, "exiting interrupt, intr_status=%#04x\n",
1711 dev->name, NETDRV_R16 (IntrStatus)); 1698 NETDRV_R16(IntrStatus));
1712 return IRQ_RETVAL(handled); 1699 return IRQ_RETVAL(handled);
1713} 1700}
1714 1701
1715 1702
1716static int netdrv_close (struct net_device *dev) 1703static int netdrv_close(struct net_device *dev)
1717{ 1704{
1718 struct netdrv_private *tp = netdev_priv(dev); 1705 struct netdrv_private *tp = netdev_priv(dev);
1719 void *ioaddr = tp->mmio_addr; 1706 void *ioaddr = tp->mmio_addr;
1720 unsigned long flags; 1707 unsigned long flags;
1721 1708
1722 DPRINTK ("ENTER\n"); 1709 DPRINTK("ENTER\n");
1723 1710
1724 netif_stop_queue (dev); 1711 netif_stop_queue(dev);
1725 1712
1726 DPRINTK ("%s: Shutting down ethercard, status was 0x%4.4x.\n", 1713 netdev_dbg(dev, "Shutting down ethercard, status was %#04x\n",
1727 dev->name, NETDRV_R16 (IntrStatus)); 1714 NETDRV_R16(IntrStatus));
1728 1715
1729 del_timer_sync (&tp->timer); 1716 del_timer_sync(&tp->timer);
1730 1717
1731 spin_lock_irqsave (&tp->lock, flags); 1718 spin_lock_irqsave(&tp->lock, flags);
1732 1719
1733 /* Stop the chip's Tx and Rx DMA processes. */ 1720 /* Stop the chip's Tx and Rx DMA processes. */
1734 NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear)); 1721 NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear));
1735 1722
1736 /* Disable interrupts by clearing the interrupt mask. */ 1723 /* Disable interrupts by clearing the interrupt mask. */
1737 NETDRV_W16 (IntrMask, 0x0000); 1724 NETDRV_W16(IntrMask, 0x0000);
1738 1725
1739 /* Update the error counts. */ 1726 /* Update the error counts. */
1740 dev->stats.rx_missed_errors += NETDRV_R32 (RxMissed); 1727 dev->stats.rx_missed_errors += NETDRV_R32(RxMissed);
1741 NETDRV_W32 (RxMissed, 0); 1728 NETDRV_W32(RxMissed, 0);
1742 1729
1743 spin_unlock_irqrestore (&tp->lock, flags); 1730 spin_unlock_irqrestore(&tp->lock, flags);
1744 1731
1745 free_irq (dev->irq, dev); 1732 free_irq(dev->irq, dev);
1746 1733
1747 netdrv_tx_clear (dev); 1734 netdrv_tx_clear(dev);
1748 1735
1749 pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN, 1736 pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
1750 tp->rx_ring, tp->rx_ring_dma); 1737 tp->rx_ring, tp->rx_ring_dma);
@@ -1754,23 +1741,23 @@ static int netdrv_close (struct net_device *dev)
1754 tp->tx_bufs = NULL; 1741 tp->tx_bufs = NULL;
1755 1742
1756 /* Green! Put the chip in low-power mode. */ 1743 /* Green! Put the chip in low-power mode. */
1757 NETDRV_W8 (Cfg9346, Cfg9346_Unlock); 1744 NETDRV_W8(Cfg9346, Cfg9346_Unlock);
1758 NETDRV_W8 (Config1, 0x03); 1745 NETDRV_W8(Config1, 0x03);
1759 NETDRV_W8 (Cfg9346, Cfg9346_Lock); 1746 NETDRV_W8(Cfg9346, Cfg9346_Lock);
1760 1747
1761 DPRINTK ("EXIT\n"); 1748 DPRINTK("EXIT\n");
1762 return 0; 1749 return 0;
1763} 1750}
1764 1751
1765 1752
1766static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) 1753static int netdrv_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1767{ 1754{
1768 struct netdrv_private *tp = netdev_priv(dev); 1755 struct netdrv_private *tp = netdev_priv(dev);
1769 struct mii_ioctl_data *data = if_mii(rq); 1756 struct mii_ioctl_data *data = if_mii(rq);
1770 unsigned long flags; 1757 unsigned long flags;
1771 int rc = 0; 1758 int rc = 0;
1772 1759
1773 DPRINTK ("ENTER\n"); 1760 DPRINTK("ENTER\n");
1774 1761
1775 switch (cmd) { 1762 switch (cmd) {
1776 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 1763 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
@@ -1778,15 +1765,15 @@ static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1778 /* Fall Through */ 1765 /* Fall Through */
1779 1766
1780 case SIOCGMIIREG: /* Read MII PHY register. */ 1767 case SIOCGMIIREG: /* Read MII PHY register. */
1781 spin_lock_irqsave (&tp->lock, flags); 1768 spin_lock_irqsave(&tp->lock, flags);
1782 data->val_out = mdio_read (dev, data->phy_id & 0x1f, data->reg_num & 0x1f); 1769 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1783 spin_unlock_irqrestore (&tp->lock, flags); 1770 spin_unlock_irqrestore(&tp->lock, flags);
1784 break; 1771 break;
1785 1772
1786 case SIOCSMIIREG: /* Write MII PHY register. */ 1773 case SIOCSMIIREG: /* Write MII PHY register. */
1787 spin_lock_irqsave (&tp->lock, flags); 1774 spin_lock_irqsave(&tp->lock, flags);
1788 mdio_write (dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); 1775 mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1789 spin_unlock_irqrestore (&tp->lock, flags); 1776 spin_unlock_irqrestore(&tp->lock, flags);
1790 break; 1777 break;
1791 1778
1792 default: 1779 default:
@@ -1794,43 +1781,43 @@ static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1794 break; 1781 break;
1795 } 1782 }
1796 1783
1797 DPRINTK ("EXIT, returning %d\n", rc); 1784 DPRINTK("EXIT, returning %d\n", rc);
1798 return rc; 1785 return rc;
1799} 1786}
1800 1787
1801/* Set or clear the multicast filter for this adaptor. 1788/* Set or clear the multicast filter for this adaptor.
1802 This routine is not state sensitive and need not be SMP locked. */ 1789 This routine is not state sensitive and need not be SMP locked. */
1803 1790
1804static void netdrv_set_rx_mode (struct net_device *dev) 1791static void netdrv_set_rx_mode(struct net_device *dev)
1805{ 1792{
1806 struct netdrv_private *tp = netdev_priv(dev); 1793 struct netdrv_private *tp = netdev_priv(dev);
1807 void *ioaddr = tp->mmio_addr; 1794 void *ioaddr = tp->mmio_addr;
1808 u32 mc_filter[2]; /* Multicast hash filter */ 1795 u32 mc_filter[2]; /* Multicast hash filter */
1809 int i, rx_mode; 1796 int rx_mode;
1810 u32 tmp; 1797 u32 tmp;
1811 1798
1812 DPRINTK ("ENTER\n"); 1799 DPRINTK("ENTER\n");
1813 1800
1814 DPRINTK ("%s: netdrv_set_rx_mode(%4.4x) done -- Rx config %8.8x.\n", 1801 netdev_dbg(dev, "%s(%04x) done -- Rx config %08lx\n",
1815 dev->name, dev->flags, NETDRV_R32 (RxConfig)); 1802 __func__, dev->flags, NETDRV_R32(RxConfig));
1816 1803
1817 /* Note: do not reorder, GCC is clever about common statements. */ 1804 /* Note: do not reorder, GCC is clever about common statements. */
1818 if (dev->flags & IFF_PROMISC) { 1805 if (dev->flags & IFF_PROMISC) {
1819 rx_mode = 1806 rx_mode =
1820 AcceptBroadcast | AcceptMulticast | AcceptMyPhys | 1807 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
1821 AcceptAllPhys; 1808 AcceptAllPhys;
1822 mc_filter[1] = mc_filter[0] = 0xffffffff; 1809 mc_filter[1] = mc_filter[0] = 0xffffffff;
1823 } else if ((dev->mc_count > multicast_filter_limit) || 1810 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1824 (dev->flags & IFF_ALLMULTI)) { 1811 (dev->flags & IFF_ALLMULTI)) {
1825 /* Too many to filter perfectly -- accept all multicasts. */ 1812 /* Too many to filter perfectly -- accept all multicasts. */
1826 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 1813 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1827 mc_filter[1] = mc_filter[0] = 0xffffffff; 1814 mc_filter[1] = mc_filter[0] = 0xffffffff;
1828 } else { 1815 } else {
1829 struct dev_mc_list *mclist; 1816 struct dev_mc_list *mclist;
1817
1830 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 1818 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1831 mc_filter[1] = mc_filter[0] = 0; 1819 mc_filter[1] = mc_filter[0] = 0;
1832 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1820 netdev_for_each_mc_addr(mclist, dev) {
1833 i++, mclist = mclist->next) {
1834 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 1821 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1835 1822
1836 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 1823 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
@@ -1838,66 +1825,66 @@ static void netdrv_set_rx_mode (struct net_device *dev)
1838 } 1825 }
1839 1826
1840 /* if called from irq handler, lock already acquired */ 1827 /* if called from irq handler, lock already acquired */
1841 if (!in_irq ()) 1828 if (!in_irq())
1842 spin_lock_irq (&tp->lock); 1829 spin_lock_irq(&tp->lock);
1843 1830
1844 /* We can safely update without stopping the chip. */ 1831 /* We can safely update without stopping the chip. */
1845 tmp = netdrv_rx_config | rx_mode | 1832 tmp = netdrv_rx_config | rx_mode |
1846 (NETDRV_R32 (RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask); 1833 (NETDRV_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
1847 NETDRV_W32_F (RxConfig, tmp); 1834 NETDRV_W32_F(RxConfig, tmp);
1848 NETDRV_W32_F (MAR0 + 0, mc_filter[0]); 1835 NETDRV_W32_F(MAR0 + 0, mc_filter[0]);
1849 NETDRV_W32_F (MAR0 + 4, mc_filter[1]); 1836 NETDRV_W32_F(MAR0 + 4, mc_filter[1]);
1850 1837
1851 if (!in_irq ()) 1838 if (!in_irq())
1852 spin_unlock_irq (&tp->lock); 1839 spin_unlock_irq(&tp->lock);
1853 1840
1854 DPRINTK ("EXIT\n"); 1841 DPRINTK("EXIT\n");
1855} 1842}
1856 1843
1857 1844
1858#ifdef CONFIG_PM 1845#ifdef CONFIG_PM
1859 1846
1860static int netdrv_suspend (struct pci_dev *pdev, pm_message_t state) 1847static int netdrv_suspend(struct pci_dev *pdev, pm_message_t state)
1861{ 1848{
1862 struct net_device *dev = pci_get_drvdata (pdev); 1849 struct net_device *dev = pci_get_drvdata(pdev);
1863 struct netdrv_private *tp = netdev_priv(dev); 1850 struct netdrv_private *tp = netdev_priv(dev);
1864 void *ioaddr = tp->mmio_addr; 1851 void *ioaddr = tp->mmio_addr;
1865 unsigned long flags; 1852 unsigned long flags;
1866 1853
1867 if (!netif_running(dev)) 1854 if (!netif_running(dev))
1868 return 0; 1855 return 0;
1869 netif_device_detach (dev); 1856 netif_device_detach(dev);
1870 1857
1871 spin_lock_irqsave (&tp->lock, flags); 1858 spin_lock_irqsave(&tp->lock, flags);
1872 1859
1873 /* Disable interrupts, stop Tx and Rx. */ 1860 /* Disable interrupts, stop Tx and Rx. */
1874 NETDRV_W16 (IntrMask, 0x0000); 1861 NETDRV_W16(IntrMask, 0x0000);
1875 NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear)); 1862 NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear));
1876 1863
1877 /* Update the error counts. */ 1864 /* Update the error counts. */
1878 dev->stats.rx_missed_errors += NETDRV_R32 (RxMissed); 1865 dev->stats.rx_missed_errors += NETDRV_R32(RxMissed);
1879 NETDRV_W32 (RxMissed, 0); 1866 NETDRV_W32(RxMissed, 0);
1880 1867
1881 spin_unlock_irqrestore (&tp->lock, flags); 1868 spin_unlock_irqrestore(&tp->lock, flags);
1882 1869
1883 pci_save_state (pdev); 1870 pci_save_state(pdev);
1884 pci_set_power_state (pdev, PCI_D3hot); 1871 pci_set_power_state(pdev, PCI_D3hot);
1885 1872
1886 return 0; 1873 return 0;
1887} 1874}
1888 1875
1889 1876
1890static int netdrv_resume (struct pci_dev *pdev) 1877static int netdrv_resume(struct pci_dev *pdev)
1891{ 1878{
1892 struct net_device *dev = pci_get_drvdata (pdev); 1879 struct net_device *dev = pci_get_drvdata(pdev);
1893 /*struct netdrv_private *tp = netdev_priv(dev);*/ 1880 /*struct netdrv_private *tp = netdev_priv(dev);*/
1894 1881
1895 if (!netif_running(dev)) 1882 if (!netif_running(dev))
1896 return 0; 1883 return 0;
1897 pci_set_power_state (pdev, PCI_D0); 1884 pci_set_power_state(pdev, PCI_D0);
1898 pci_restore_state (pdev); 1885 pci_restore_state(pdev);
1899 netif_device_attach (dev); 1886 netif_device_attach(dev);
1900 netdrv_hw_start (dev); 1887 netdrv_hw_start(dev);
1901 1888
1902 return 0; 1889 return 0;
1903} 1890}
@@ -1917,7 +1904,7 @@ static struct pci_driver netdrv_pci_driver = {
1917}; 1904};
1918 1905
1919 1906
1920static int __init netdrv_init_module (void) 1907static int __init netdrv_init_module(void)
1921{ 1908{
1922/* when a module, this is printed whether or not devices are found in probe */ 1909/* when a module, this is printed whether or not devices are found in probe */
1923#ifdef MODULE 1910#ifdef MODULE
@@ -1927,9 +1914,9 @@ static int __init netdrv_init_module (void)
1927} 1914}
1928 1915
1929 1916
1930static void __exit netdrv_cleanup_module (void) 1917static void __exit netdrv_cleanup_module(void)
1931{ 1918{
1932 pci_unregister_driver (&netdrv_pci_driver); 1919 pci_unregister_driver(&netdrv_pci_driver);
1933} 1920}
1934 1921
1935 1922
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 98938ea9e0bd..3d1d3a7b7ed3 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -1148,7 +1148,7 @@ static void set_rx_mode(struct net_device *dev)
1148 if (dev->flags & IFF_PROMISC) 1148 if (dev->flags & IFF_PROMISC)
1149 outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm, 1149 outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
1150 ioaddr + EL3_CMD); 1150 ioaddr + EL3_CMD);
1151 else if (dev->mc_count || (dev->flags & IFF_ALLMULTI)) 1151 else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
1152 outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD); 1152 outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
1153 else 1153 else
1154 outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD); 1154 outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 322e11df0097..091e0b00043e 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -886,7 +886,7 @@ static void set_rx_mode(struct net_device *dev)
886 886
887 if (dev->flags & IFF_PROMISC) 887 if (dev->flags & IFF_PROMISC)
888 opts |= RxMulticast | RxProm; 888 opts |= RxMulticast | RxProm;
889 else if (dev->mc_count || (dev->flags & IFF_ALLMULTI)) 889 else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
890 opts |= RxMulticast; 890 opts |= RxMulticast;
891 outw(opts, ioaddr + EL3_CMD); 891 outw(opts, ioaddr + EL3_CMD);
892} 892}
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index d431b59e7d11..09291e60d309 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -779,6 +779,7 @@ static struct pcmcia_device_id axnet_ids[] = {
779 PCMCIA_DEVICE_PROD_ID12("CNet", "CNF301", 0xbc477dde, 0x78c5f40b), 779 PCMCIA_DEVICE_PROD_ID12("CNet", "CNF301", 0xbc477dde, 0x78c5f40b),
780 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEther PCC-TXD", 0x5261440f, 0x436768c5), 780 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEther PCC-TXD", 0x5261440f, 0x436768c5),
781 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEtherII PCC-TXD", 0x5261440f, 0x730df72e), 781 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEtherII PCC-TXD", 0x5261440f, 0x730df72e),
782 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEther PCC-TXM", 0x5261440f, 0x3abbd061),
782 PCMCIA_DEVICE_PROD_ID12("Dynalink", "L100C16", 0x55632fd5, 0x66bc2a90), 783 PCMCIA_DEVICE_PROD_ID12("Dynalink", "L100C16", 0x55632fd5, 0x66bc2a90),
783 PCMCIA_DEVICE_PROD_ID12("IO DATA", "ETXPCM", 0x547e66dc, 0x233adac2), 784 PCMCIA_DEVICE_PROD_ID12("IO DATA", "ETXPCM", 0x547e66dc, 0x233adac2),
784 PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V3)", 0x0733cc81, 0x232019a8), 785 PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V3)", 0x0733cc81, 0x232019a8),
@@ -1065,14 +1066,11 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
1065 1066
1066 spin_lock_irqsave(&ei_local->page_lock, flags); 1067 spin_lock_irqsave(&ei_local->page_lock, flags);
1067 outb_p(0x00, e8390_base + EN0_IMR); 1068 outb_p(0x00, e8390_base + EN0_IMR);
1068 spin_unlock_irqrestore(&ei_local->page_lock, flags);
1069 1069
1070 /* 1070 /*
1071 * Slow phase with lock held. 1071 * Slow phase with lock held.
1072 */ 1072 */
1073 1073
1074 spin_lock_irqsave(&ei_local->page_lock, flags);
1075
1076 ei_local->irqlock = 1; 1074 ei_local->irqlock = 1;
1077 1075
1078 send_length = max(length, ETH_ZLEN); 1076 send_length = max(length, ETH_ZLEN);
@@ -1628,8 +1626,7 @@ static inline void make_mc_bits(u8 *bits, struct net_device *dev)
1628 struct dev_mc_list *dmi; 1626 struct dev_mc_list *dmi;
1629 u32 crc; 1627 u32 crc;
1630 1628
1631 for (dmi=dev->mc_list; dmi; dmi=dmi->next) { 1629 netdev_for_each_mc_addr(dmi, dev) {
1632
1633 crc = ether_crc(ETH_ALEN, dmi->dmi_addr); 1630 crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
1634 /* 1631 /*
1635 * The 8390 uses the 6 most significant bits of the 1632 * The 8390 uses the 6 most significant bits of the
@@ -1655,7 +1652,7 @@ static void do_set_multicast_list(struct net_device *dev)
1655 1652
1656 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) { 1653 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
1657 memset(ei_local->mcfilter, 0, 8); 1654 memset(ei_local->mcfilter, 0, 8);
1658 if (dev->mc_list) 1655 if (!netdev_mc_empty(dev))
1659 make_mc_bits(ei_local->mcfilter, dev); 1656 make_mc_bits(ei_local->mcfilter, dev);
1660 } else { 1657 } else {
1661 /* set to accept-all */ 1658 /* set to accept-all */
@@ -1671,7 +1668,7 @@ static void do_set_multicast_list(struct net_device *dev)
1671 1668
1672 if(dev->flags&IFF_PROMISC) 1669 if(dev->flags&IFF_PROMISC)
1673 outb_p(E8390_RXCONFIG | 0x58, e8390_base + EN0_RXCR); 1670 outb_p(E8390_RXCONFIG | 0x58, e8390_base + EN0_RXCR);
1674 else if(dev->flags&IFF_ALLMULTI || dev->mc_list) 1671 else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
1675 outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR); 1672 outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR);
1676 else 1673 else
1677 outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR); 1674 outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR);
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 7b17404d0858..b9dc80b9d04a 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -1187,22 +1187,20 @@ static void set_rx_mode(struct net_device *dev)
1187 if (dev->flags & IFF_PROMISC) { 1187 if (dev->flags & IFF_PROMISC) {
1188 memset(mc_filter, 0xff, sizeof(mc_filter)); 1188 memset(mc_filter, 0xff, sizeof(mc_filter));
1189 outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */ 1189 outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
1190 } else if (dev->mc_count > MC_FILTERBREAK || 1190 } else if (netdev_mc_count(dev) > MC_FILTERBREAK ||
1191 (dev->flags & IFF_ALLMULTI)) { 1191 (dev->flags & IFF_ALLMULTI)) {
1192 /* Too many to filter perfectly -- accept all multicasts. */ 1192 /* Too many to filter perfectly -- accept all multicasts. */
1193 memset(mc_filter, 0xff, sizeof(mc_filter)); 1193 memset(mc_filter, 0xff, sizeof(mc_filter));
1194 outb(2, ioaddr + RX_MODE); /* Use normal mode. */ 1194 outb(2, ioaddr + RX_MODE); /* Use normal mode. */
1195 } else if (dev->mc_count == 0) { 1195 } else if (netdev_mc_empty(dev)) {
1196 memset(mc_filter, 0x00, sizeof(mc_filter)); 1196 memset(mc_filter, 0x00, sizeof(mc_filter));
1197 outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */ 1197 outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
1198 } else { 1198 } else {
1199 struct dev_mc_list *mclist; 1199 struct dev_mc_list *mclist;
1200 1200
1201 memset(mc_filter, 0, sizeof(mc_filter)); 1201 memset(mc_filter, 0, sizeof(mc_filter));
1202 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1202 netdev_for_each_mc_addr(mclist, dev) {
1203 i++, mclist = mclist->next) { 1203 unsigned int bit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
1204 unsigned int bit =
1205 ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
1206 mc_filter[bit >> 3] |= (1 << (bit & 7)); 1204 mc_filter[bit >> 3] |= (1 << (bit & 7));
1207 } 1205 }
1208 outb(2, ioaddr + RX_MODE); /* Use normal mode. */ 1206 outb(2, ioaddr + RX_MODE); /* Use normal mode. */
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 12e3233868e9..c717b143f11a 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -1475,14 +1475,13 @@ static void set_multicast_list(struct net_device *dev)
1475{ 1475{
1476 mace_private *lp = netdev_priv(dev); 1476 mace_private *lp = netdev_priv(dev);
1477 int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */ 1477 int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */
1478 int i; 1478 struct dev_mc_list *dmi;
1479 struct dev_mc_list *dmi = dev->mc_list;
1480 1479
1481#ifdef PCMCIA_DEBUG 1480#ifdef PCMCIA_DEBUG
1482 { 1481 {
1483 static int old; 1482 static int old;
1484 if (dev->mc_count != old) { 1483 if (netdev_mc_count(dev) != old) {
1485 old = dev->mc_count; 1484 old = netdev_mc_count(dev);
1486 pr_debug("%s: setting Rx mode to %d addresses.\n", 1485 pr_debug("%s: setting Rx mode to %d addresses.\n",
1487 dev->name, old); 1486 dev->name, old);
1488 } 1487 }
@@ -1490,15 +1489,14 @@ static void set_multicast_list(struct net_device *dev)
1490#endif 1489#endif
1491 1490
1492 /* Set multicast_num_addrs. */ 1491 /* Set multicast_num_addrs. */
1493 lp->multicast_num_addrs = dev->mc_count; 1492 lp->multicast_num_addrs = netdev_mc_count(dev);
1494 1493
1495 /* Set multicast_ladrf. */ 1494 /* Set multicast_ladrf. */
1496 if (num_addrs > 0) { 1495 if (num_addrs > 0) {
1497 /* Calculate multicast logical address filter */ 1496 /* Calculate multicast logical address filter */
1498 memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN); 1497 memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
1499 for (i = 0; i < dev->mc_count; i++) { 1498 netdev_for_each_mc_addr(dmi, dev) {
1500 memcpy(adr, dmi->dmi_addr, ETHER_ADDR_LEN); 1499 memcpy(adr, dmi->dmi_addr, ETHER_ADDR_LEN);
1501 dmi = dmi->next;
1502 BuildLAF(lp->multicast_ladrf, adr); 1500 BuildLAF(lp->multicast_ladrf, adr);
1503 } 1501 }
1504 } 1502 }
@@ -1537,15 +1535,15 @@ static void set_multicast_list(struct net_device *dev)
1537#ifdef PCMCIA_DEBUG 1535#ifdef PCMCIA_DEBUG
1538 { 1536 {
1539 static int old; 1537 static int old;
1540 if (dev->mc_count != old) { 1538 if (netdev_mc_count(dev) != old) {
1541 old = dev->mc_count; 1539 old = netdev_mc_count(dev);
1542 pr_debug("%s: setting Rx mode to %d addresses.\n", 1540 pr_debug("%s: setting Rx mode to %d addresses.\n",
1543 dev->name, old); 1541 dev->name, old);
1544 } 1542 }
1545 } 1543 }
1546#endif 1544#endif
1547 1545
1548 lp->multicast_num_addrs = dev->mc_count; 1546 lp->multicast_num_addrs = netdev_mc_count(dev);
1549 restore_multicast_list(dev); 1547 restore_multicast_list(dev);
1550 1548
1551} /* set_multicast_list */ 1549} /* set_multicast_list */
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index aa57cfd1e3fb..5adc662c4bfb 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -1593,27 +1593,6 @@ static void smc_rx(struct net_device *dev)
1593 1593
1594/*====================================================================== 1594/*======================================================================
1595 1595
1596 Calculate values for the hardware multicast filter hash table.
1597
1598======================================================================*/
1599
1600static void fill_multicast_tbl(int count, struct dev_mc_list *addrs,
1601 u_char *multicast_table)
1602{
1603 struct dev_mc_list *mc_addr;
1604
1605 for (mc_addr = addrs; mc_addr && count-- > 0; mc_addr = mc_addr->next) {
1606 u_int position = ether_crc(6, mc_addr->dmi_addr);
1607#ifndef final_version /* Verify multicast address. */
1608 if ((mc_addr->dmi_addr[0] & 1) == 0)
1609 continue;
1610#endif
1611 multicast_table[position >> 29] |= 1 << ((position >> 26) & 7);
1612 }
1613}
1614
1615/*======================================================================
1616
1617 Set the receive mode. 1596 Set the receive mode.
1618 1597
1619 This routine is used by both the protocol level to notify us of 1598 This routine is used by both the protocol level to notify us of
@@ -1636,9 +1615,17 @@ static void set_rx_mode(struct net_device *dev)
1636 } else if (dev->flags & IFF_ALLMULTI) 1615 } else if (dev->flags & IFF_ALLMULTI)
1637 rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti; 1616 rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti;
1638 else { 1617 else {
1639 if (dev->mc_count) { 1618 if (!netdev_mc_empty(dev)) {
1640 fill_multicast_tbl(dev->mc_count, dev->mc_list, 1619 struct dev_mc_list *mc_addr;
1641 (u_char *)multicast_table); 1620
1621 netdev_for_each_mc_addr(mc_addr, dev) {
1622 u_int position = ether_crc(6, mc_addr->dmi_addr);
1623#ifndef final_version /* Verify multicast address. */
1624 if ((mc_addr->dmi_addr[0] & 1) == 0)
1625 continue;
1626#endif
1627 multicast_table[position >> 29] |= 1 << ((position >> 26) & 7);
1628 }
1642 } 1629 }
1643 rx_cfg_setting = RxStripCRC | RxEnable; 1630 rx_cfg_setting = RxStripCRC | RxEnable;
1644 } 1631 }
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 466fc72698c0..4d1802e457be 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -1364,47 +1364,63 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev)
1364 return NETDEV_TX_OK; 1364 return NETDEV_TX_OK;
1365} 1365}
1366 1366
1367struct set_address_info {
1368 int reg_nr;
1369 int page_nr;
1370 int mohawk;
1371 unsigned int ioaddr;
1372};
1373
1374static void set_address(struct set_address_info *sa_info, char *addr)
1375{
1376 unsigned int ioaddr = sa_info->ioaddr;
1377 int i;
1378
1379 for (i = 0; i < 6; i++) {
1380 if (sa_info->reg_nr > 15) {
1381 sa_info->reg_nr = 8;
1382 sa_info->page_nr++;
1383 SelectPage(sa_info->page_nr);
1384 }
1385 if (sa_info->mohawk)
1386 PutByte(sa_info->reg_nr++, addr[5 - i]);
1387 else
1388 PutByte(sa_info->reg_nr++, addr[i]);
1389 }
1390}
1391
1367/**************** 1392/****************
1368 * Set all addresses: This first one is the individual address, 1393 * Set all addresses: This first one is the individual address,
1369 * the next 9 addresses are taken from the multicast list and 1394 * the next 9 addresses are taken from the multicast list and
1370 * the rest is filled with the individual address. 1395 * the rest is filled with the individual address.
1371 */ 1396 */
1372static void 1397static void set_addresses(struct net_device *dev)
1373set_addresses(struct net_device *dev)
1374{ 1398{
1375 unsigned int ioaddr = dev->base_addr; 1399 unsigned int ioaddr = dev->base_addr;
1376 local_info_t *lp = netdev_priv(dev); 1400 local_info_t *lp = netdev_priv(dev);
1377 struct dev_mc_list *dmi = dev->mc_list; 1401 struct dev_mc_list *dmi;
1378 unsigned char *addr; 1402 struct set_address_info sa_info;
1379 int i,j,k,n; 1403 int i;
1380
1381 SelectPage(k=0x50);
1382 for (i=0,j=8,n=0; ; i++, j++) {
1383 if (i > 5) {
1384 if (++n > 9)
1385 break;
1386 i = 0;
1387 if (n > 1 && n <= dev->mc_count && dmi) {
1388 dmi = dmi->next;
1389 }
1390 }
1391 if (j > 15) {
1392 j = 8;
1393 k++;
1394 SelectPage(k);
1395 }
1396
1397 if (n && n <= dev->mc_count && dmi)
1398 addr = dmi->dmi_addr;
1399 else
1400 addr = dev->dev_addr;
1401 1404
1402 if (lp->mohawk) 1405 /*
1403 PutByte(j, addr[5-i]); 1406 * Setup the info structure so that by first set_address call it will do
1404 else 1407 * SelectPage with the right page number. Hence these ones here.
1405 PutByte(j, addr[i]); 1408 */
1406 } 1409 sa_info.reg_nr = 15 + 1;
1407 SelectPage(0); 1410 sa_info.page_nr = 0x50 - 1;
1411 sa_info.mohawk = lp->mohawk;
1412 sa_info.ioaddr = ioaddr;
1413
1414 set_address(&sa_info, dev->dev_addr);
1415 i = 0;
1416 netdev_for_each_mc_addr(dmi, dev) {
1417 if (i++ == 9)
1418 break;
1419 set_address(&sa_info, dmi->dmi_addr);
1420 }
1421 while (i++ < 9)
1422 set_address(&sa_info, dev->dev_addr);
1423 SelectPage(0);
1408} 1424}
1409 1425
1410/**************** 1426/****************
@@ -1424,9 +1440,9 @@ set_multicast_list(struct net_device *dev)
1424 1440
1425 if (dev->flags & IFF_PROMISC) { /* snoop */ 1441 if (dev->flags & IFF_PROMISC) { /* snoop */
1426 PutByte(XIRCREG42_SWC1, value | 0x06); /* set MPE and PME */ 1442 PutByte(XIRCREG42_SWC1, value | 0x06); /* set MPE and PME */
1427 } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) { 1443 } else if (netdev_mc_count(dev) > 9 || (dev->flags & IFF_ALLMULTI)) {
1428 PutByte(XIRCREG42_SWC1, value | 0x02); /* set MPE */ 1444 PutByte(XIRCREG42_SWC1, value | 0x02); /* set MPE */
1429 } else if (dev->mc_count) { 1445 } else if (!netdev_mc_empty(dev)) {
1430 /* the chip can filter 9 addresses perfectly */ 1446 /* the chip can filter 9 addresses perfectly */
1431 PutByte(XIRCREG42_SWC1, value | 0x01); 1447 PutByte(XIRCREG42_SWC1, value | 0x01);
1432 SelectPage(0x40); 1448 SelectPage(0x40);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index e154677ff706..084d78dd1637 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -21,6 +21,8 @@
21 * 21 *
22 *************************************************************************/ 22 *************************************************************************/
23 23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
24#define DRV_NAME "pcnet32" 26#define DRV_NAME "pcnet32"
25#define DRV_VERSION "1.35" 27#define DRV_VERSION "1.35"
26#define DRV_RELDATE "21.Apr.2008" 28#define DRV_RELDATE "21.Apr.2008"
@@ -50,16 +52,16 @@ static const char *const version =
50#include <linux/spinlock.h> 52#include <linux/spinlock.h>
51#include <linux/moduleparam.h> 53#include <linux/moduleparam.h>
52#include <linux/bitops.h> 54#include <linux/bitops.h>
55#include <linux/io.h>
56#include <linux/uaccess.h>
53 57
54#include <asm/dma.h> 58#include <asm/dma.h>
55#include <asm/io.h>
56#include <asm/uaccess.h>
57#include <asm/irq.h> 59#include <asm/irq.h>
58 60
59/* 61/*
60 * PCI device identifiers for "new style" Linux PCI Device Drivers 62 * PCI device identifiers for "new style" Linux PCI Device Drivers
61 */ 63 */
62static struct pci_device_id pcnet32_pci_tbl[] = { 64static DEFINE_PCI_DEVICE_TABLE(pcnet32_pci_tbl) = {
63 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), }, 65 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
64 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), }, 66 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
65 67
@@ -83,7 +85,7 @@ static int cards_found;
83static unsigned int pcnet32_portlist[] __initdata = 85static unsigned int pcnet32_portlist[] __initdata =
84 { 0x300, 0x320, 0x340, 0x360, 0 }; 86 { 0x300, 0x320, 0x340, 0x360, 0 };
85 87
86static int pcnet32_debug = 0; 88static int pcnet32_debug;
87static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */ 89static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
88static int pcnet32vlb; /* check for VLB cards ? */ 90static int pcnet32vlb; /* check for VLB cards ? */
89 91
@@ -390,7 +392,7 @@ static struct pcnet32_access pcnet32_wio = {
390static u16 pcnet32_dwio_read_csr(unsigned long addr, int index) 392static u16 pcnet32_dwio_read_csr(unsigned long addr, int index)
391{ 393{
392 outl(index, addr + PCNET32_DWIO_RAP); 394 outl(index, addr + PCNET32_DWIO_RAP);
393 return (inl(addr + PCNET32_DWIO_RDP) & 0xffff); 395 return inl(addr + PCNET32_DWIO_RDP) & 0xffff;
394} 396}
395 397
396static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val) 398static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
@@ -402,7 +404,7 @@ static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
402static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index) 404static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index)
403{ 405{
404 outl(index, addr + PCNET32_DWIO_RAP); 406 outl(index, addr + PCNET32_DWIO_RAP);
405 return (inl(addr + PCNET32_DWIO_BDP) & 0xffff); 407 return inl(addr + PCNET32_DWIO_BDP) & 0xffff;
406} 408}
407 409
408static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val) 410static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
@@ -413,7 +415,7 @@ static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
413 415
414static u16 pcnet32_dwio_read_rap(unsigned long addr) 416static u16 pcnet32_dwio_read_rap(unsigned long addr)
415{ 417{
416 return (inl(addr + PCNET32_DWIO_RAP) & 0xffff); 418 return inl(addr + PCNET32_DWIO_RAP) & 0xffff;
417} 419}
418 420
419static void pcnet32_dwio_write_rap(unsigned long addr, u16 val) 421static void pcnet32_dwio_write_rap(unsigned long addr, u16 val)
@@ -487,10 +489,7 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
487 (1 << size), 489 (1 << size),
488 &new_ring_dma_addr); 490 &new_ring_dma_addr);
489 if (new_tx_ring == NULL) { 491 if (new_tx_ring == NULL) {
490 if (netif_msg_drv(lp)) 492 netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
491 printk(KERN_ERR
492 "%s: Consistent memory allocation failed.\n",
493 dev->name);
494 return; 493 return;
495 } 494 }
496 memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size)); 495 memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size));
@@ -498,18 +497,14 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
498 new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t), 497 new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
499 GFP_ATOMIC); 498 GFP_ATOMIC);
500 if (!new_dma_addr_list) { 499 if (!new_dma_addr_list) {
501 if (netif_msg_drv(lp)) 500 netif_err(lp, drv, dev, "Memory allocation failed\n");
502 printk(KERN_ERR
503 "%s: Memory allocation failed.\n", dev->name);
504 goto free_new_tx_ring; 501 goto free_new_tx_ring;
505 } 502 }
506 503
507 new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *), 504 new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
508 GFP_ATOMIC); 505 GFP_ATOMIC);
509 if (!new_skb_list) { 506 if (!new_skb_list) {
510 if (netif_msg_drv(lp)) 507 netif_err(lp, drv, dev, "Memory allocation failed\n");
511 printk(KERN_ERR
512 "%s: Memory allocation failed.\n", dev->name);
513 goto free_new_lists; 508 goto free_new_lists;
514 } 509 }
515 510
@@ -529,15 +524,14 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
529 lp->tx_skbuff = new_skb_list; 524 lp->tx_skbuff = new_skb_list;
530 return; 525 return;
531 526
532 free_new_lists: 527free_new_lists:
533 kfree(new_dma_addr_list); 528 kfree(new_dma_addr_list);
534 free_new_tx_ring: 529free_new_tx_ring:
535 pci_free_consistent(lp->pci_dev, 530 pci_free_consistent(lp->pci_dev,
536 sizeof(struct pcnet32_tx_head) * 531 sizeof(struct pcnet32_tx_head) *
537 (1 << size), 532 (1 << size),
538 new_tx_ring, 533 new_tx_ring,
539 new_ring_dma_addr); 534 new_ring_dma_addr);
540 return;
541} 535}
542 536
543/* 537/*
@@ -565,10 +559,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
565 (1 << size), 559 (1 << size),
566 &new_ring_dma_addr); 560 &new_ring_dma_addr);
567 if (new_rx_ring == NULL) { 561 if (new_rx_ring == NULL) {
568 if (netif_msg_drv(lp)) 562 netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
569 printk(KERN_ERR
570 "%s: Consistent memory allocation failed.\n",
571 dev->name);
572 return; 563 return;
573 } 564 }
574 memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size)); 565 memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
@@ -576,18 +567,14 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
576 new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t), 567 new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
577 GFP_ATOMIC); 568 GFP_ATOMIC);
578 if (!new_dma_addr_list) { 569 if (!new_dma_addr_list) {
579 if (netif_msg_drv(lp)) 570 netif_err(lp, drv, dev, "Memory allocation failed\n");
580 printk(KERN_ERR
581 "%s: Memory allocation failed.\n", dev->name);
582 goto free_new_rx_ring; 571 goto free_new_rx_ring;
583 } 572 }
584 573
585 new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *), 574 new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
586 GFP_ATOMIC); 575 GFP_ATOMIC);
587 if (!new_skb_list) { 576 if (!new_skb_list) {
588 if (netif_msg_drv(lp)) 577 netif_err(lp, drv, dev, "Memory allocation failed\n");
589 printk(KERN_ERR
590 "%s: Memory allocation failed.\n", dev->name);
591 goto free_new_lists; 578 goto free_new_lists;
592 } 579 }
593 580
@@ -599,15 +586,14 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
599 new_skb_list[new] = lp->rx_skbuff[new]; 586 new_skb_list[new] = lp->rx_skbuff[new];
600 } 587 }
601 /* now allocate any new buffers needed */ 588 /* now allocate any new buffers needed */
602 for (; new < size; new++ ) { 589 for (; new < size; new++) {
603 struct sk_buff *rx_skbuff; 590 struct sk_buff *rx_skbuff;
604 new_skb_list[new] = dev_alloc_skb(PKT_BUF_SKB); 591 new_skb_list[new] = dev_alloc_skb(PKT_BUF_SKB);
605 if (!(rx_skbuff = new_skb_list[new])) { 592 rx_skbuff = new_skb_list[new];
593 if (!rx_skbuff) {
606 /* keep the original lists and buffers */ 594 /* keep the original lists and buffers */
607 if (netif_msg_drv(lp)) 595 netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n",
608 printk(KERN_ERR 596 __func__);
609 "%s: pcnet32_realloc_rx_ring dev_alloc_skb failed.\n",
610 dev->name);
611 goto free_all_new; 597 goto free_all_new;
612 } 598 }
613 skb_reserve(rx_skbuff, NET_IP_ALIGN); 599 skb_reserve(rx_skbuff, NET_IP_ALIGN);
@@ -644,8 +630,8 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
644 lp->rx_skbuff = new_skb_list; 630 lp->rx_skbuff = new_skb_list;
645 return; 631 return;
646 632
647 free_all_new: 633free_all_new:
648 for (; --new >= lp->rx_ring_size; ) { 634 while (--new >= lp->rx_ring_size) {
649 if (new_skb_list[new]) { 635 if (new_skb_list[new]) {
650 pci_unmap_single(lp->pci_dev, new_dma_addr_list[new], 636 pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
651 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); 637 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
@@ -653,9 +639,9 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
653 } 639 }
654 } 640 }
655 kfree(new_skb_list); 641 kfree(new_skb_list);
656 free_new_lists: 642free_new_lists:
657 kfree(new_dma_addr_list); 643 kfree(new_dma_addr_list);
658 free_new_rx_ring: 644free_new_rx_ring:
659 pci_free_consistent(lp->pci_dev, 645 pci_free_consistent(lp->pci_dev,
660 sizeof(struct pcnet32_rx_head) * 646 sizeof(struct pcnet32_rx_head) *
661 (1 << size), 647 (1 << size),
@@ -838,16 +824,14 @@ static int pcnet32_set_ringparam(struct net_device *dev,
838 824
839 spin_unlock_irqrestore(&lp->lock, flags); 825 spin_unlock_irqrestore(&lp->lock, flags);
840 826
841 if (netif_msg_drv(lp)) 827 netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n",
842 printk(KERN_INFO 828 lp->rx_ring_size, lp->tx_ring_size);
843 "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name,
844 lp->rx_ring_size, lp->tx_ring_size);
845 829
846 return 0; 830 return 0;
847} 831}
848 832
849static void pcnet32_get_strings(struct net_device *dev, u32 stringset, 833static void pcnet32_get_strings(struct net_device *dev, u32 stringset,
850 u8 * data) 834 u8 *data)
851{ 835{
852 memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test)); 836 memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
853} 837}
@@ -871,17 +855,15 @@ static void pcnet32_ethtool_test(struct net_device *dev,
871 if (test->flags == ETH_TEST_FL_OFFLINE) { 855 if (test->flags == ETH_TEST_FL_OFFLINE) {
872 rc = pcnet32_loopback_test(dev, data); 856 rc = pcnet32_loopback_test(dev, data);
873 if (rc) { 857 if (rc) {
874 if (netif_msg_hw(lp)) 858 netif_printk(lp, hw, KERN_DEBUG, dev,
875 printk(KERN_DEBUG "%s: Loopback test failed.\n", 859 "Loopback test failed\n");
876 dev->name);
877 test->flags |= ETH_TEST_FL_FAILED; 860 test->flags |= ETH_TEST_FL_FAILED;
878 } else if (netif_msg_hw(lp)) 861 } else
879 printk(KERN_DEBUG "%s: Loopback test passed.\n", 862 netif_printk(lp, hw, KERN_DEBUG, dev,
880 dev->name); 863 "Loopback test passed\n");
881 } else if (netif_msg_hw(lp)) 864 } else
882 printk(KERN_DEBUG 865 netif_printk(lp, hw, KERN_DEBUG, dev,
883 "%s: No tests to run (specify 'Offline' on ethtool).", 866 "No tests to run (specify 'Offline' on ethtool)\n");
884 dev->name);
885} /* end pcnet32_ethtool_test */ 867} /* end pcnet32_ethtool_test */
886 868
887static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) 869static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
@@ -926,40 +908,39 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
926 /* Initialize Transmit buffers. */ 908 /* Initialize Transmit buffers. */
927 size = data_len + 15; 909 size = data_len + 15;
928 for (x = 0; x < numbuffs; x++) { 910 for (x = 0; x < numbuffs; x++) {
929 if (!(skb = dev_alloc_skb(size))) { 911 skb = dev_alloc_skb(size);
930 if (netif_msg_hw(lp)) 912 if (!skb) {
931 printk(KERN_DEBUG 913 netif_printk(lp, hw, KERN_DEBUG, dev,
932 "%s: Cannot allocate skb at line: %d!\n", 914 "Cannot allocate skb at line: %d!\n",
933 dev->name, __LINE__); 915 __LINE__);
934 goto clean_up; 916 goto clean_up;
935 } else {
936 packet = skb->data;
937 skb_put(skb, size); /* create space for data */
938 lp->tx_skbuff[x] = skb;
939 lp->tx_ring[x].length = cpu_to_le16(-skb->len);
940 lp->tx_ring[x].misc = 0;
941
942 /* put DA and SA into the skb */
943 for (i = 0; i < 6; i++)
944 *packet++ = dev->dev_addr[i];
945 for (i = 0; i < 6; i++)
946 *packet++ = dev->dev_addr[i];
947 /* type */
948 *packet++ = 0x08;
949 *packet++ = 0x06;
950 /* packet number */
951 *packet++ = x;
952 /* fill packet with data */
953 for (i = 0; i < data_len; i++)
954 *packet++ = i;
955
956 lp->tx_dma_addr[x] =
957 pci_map_single(lp->pci_dev, skb->data, skb->len,
958 PCI_DMA_TODEVICE);
959 lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
960 wmb(); /* Make sure owner changes after all others are visible */
961 lp->tx_ring[x].status = cpu_to_le16(status);
962 } 917 }
918 packet = skb->data;
919 skb_put(skb, size); /* create space for data */
920 lp->tx_skbuff[x] = skb;
921 lp->tx_ring[x].length = cpu_to_le16(-skb->len);
922 lp->tx_ring[x].misc = 0;
923
924 /* put DA and SA into the skb */
925 for (i = 0; i < 6; i++)
926 *packet++ = dev->dev_addr[i];
927 for (i = 0; i < 6; i++)
928 *packet++ = dev->dev_addr[i];
929 /* type */
930 *packet++ = 0x08;
931 *packet++ = 0x06;
932 /* packet number */
933 *packet++ = x;
934 /* fill packet with data */
935 for (i = 0; i < data_len; i++)
936 *packet++ = i;
937
938 lp->tx_dma_addr[x] =
939 pci_map_single(lp->pci_dev, skb->data, skb->len,
940 PCI_DMA_TODEVICE);
941 lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
942 wmb(); /* Make sure owner changes after all others are visible */
943 lp->tx_ring[x].status = cpu_to_le16(status);
963 } 944 }
964 945
965 x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */ 946 x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */
@@ -984,9 +965,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
984 ticks++; 965 ticks++;
985 } 966 }
986 if (ticks == 200) { 967 if (ticks == 200) {
987 if (netif_msg_hw(lp)) 968 netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x);
988 printk("%s: Desc %d failed to reset!\n",
989 dev->name, x);
990 break; 969 break;
991 } 970 }
992 } 971 }
@@ -994,15 +973,14 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
994 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ 973 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
995 wmb(); 974 wmb();
996 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { 975 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
997 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name); 976 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
998 977
999 for (x = 0; x < numbuffs; x++) { 978 for (x = 0; x < numbuffs; x++) {
1000 printk(KERN_DEBUG "%s: Packet %d:\n", dev->name, x); 979 netdev_printk(KERN_DEBUG, dev, "Packet %d: ", x);
1001 skb = lp->rx_skbuff[x]; 980 skb = lp->rx_skbuff[x];
1002 for (i = 0; i < size; i++) { 981 for (i = 0; i < size; i++)
1003 printk("%02x ", *(skb->data + i)); 982 pr_cont(" %02x", *(skb->data + i));
1004 } 983 pr_cont("\n");
1005 printk("\n");
1006 } 984 }
1007 } 985 }
1008 986
@@ -1013,11 +991,9 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
1013 packet = lp->tx_skbuff[x]->data; 991 packet = lp->tx_skbuff[x]->data;
1014 for (i = 0; i < size; i++) { 992 for (i = 0; i < size; i++) {
1015 if (*(skb->data + i) != packet[i]) { 993 if (*(skb->data + i) != packet[i]) {
1016 if (netif_msg_hw(lp)) 994 netif_printk(lp, hw, KERN_DEBUG, dev,
1017 printk(KERN_DEBUG 995 "Error in compare! %2x - %02x %02x\n",
1018 "%s: Error in compare! %2x - %02x %02x\n", 996 i, *(skb->data + i), packet[i]);
1019 dev->name, i, *(skb->data + i),
1020 packet[i]);
1021 rc = 1; 997 rc = 1;
1022 break; 998 break;
1023 } 999 }
@@ -1025,7 +1001,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
1025 x++; 1001 x++;
1026 } 1002 }
1027 1003
1028 clean_up: 1004clean_up:
1029 *data1 = rc; 1005 *data1 = rc;
1030 pcnet32_purge_tx_ring(dev); 1006 pcnet32_purge_tx_ring(dev);
1031 1007
@@ -1044,7 +1020,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
1044 } 1020 }
1045 spin_unlock_irqrestore(&lp->lock, flags); 1021 spin_unlock_irqrestore(&lp->lock, flags);
1046 1022
1047 return (rc); 1023 return rc;
1048} /* end pcnet32_loopback_test */ 1024} /* end pcnet32_loopback_test */
1049 1025
1050static void pcnet32_led_blink_callback(struct net_device *dev) 1026static void pcnet32_led_blink_callback(struct net_device *dev)
@@ -1056,9 +1032,8 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
1056 int i; 1032 int i;
1057 1033
1058 spin_lock_irqsave(&lp->lock, flags); 1034 spin_lock_irqsave(&lp->lock, flags);
1059 for (i = 4; i < 8; i++) { 1035 for (i = 4; i < 8; i++)
1060 a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000); 1036 a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
1061 }
1062 spin_unlock_irqrestore(&lp->lock, flags); 1037 spin_unlock_irqrestore(&lp->lock, flags);
1063 1038
1064 mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT); 1039 mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT);
@@ -1080,9 +1055,8 @@ static int pcnet32_phys_id(struct net_device *dev, u32 data)
1080 1055
1081 /* Save the current value of the bcrs */ 1056 /* Save the current value of the bcrs */
1082 spin_lock_irqsave(&lp->lock, flags); 1057 spin_lock_irqsave(&lp->lock, flags);
1083 for (i = 4; i < 8; i++) { 1058 for (i = 4; i < 8; i++)
1084 regs[i - 4] = a->read_bcr(ioaddr, i); 1059 regs[i - 4] = a->read_bcr(ioaddr, i);
1085 }
1086 spin_unlock_irqrestore(&lp->lock, flags); 1060 spin_unlock_irqrestore(&lp->lock, flags);
1087 1061
1088 mod_timer(&lp->blink_timer, jiffies); 1062 mod_timer(&lp->blink_timer, jiffies);
@@ -1097,9 +1071,8 @@ static int pcnet32_phys_id(struct net_device *dev, u32 data)
1097 1071
1098 /* Restore the original value of the bcrs */ 1072 /* Restore the original value of the bcrs */
1099 spin_lock_irqsave(&lp->lock, flags); 1073 spin_lock_irqsave(&lp->lock, flags);
1100 for (i = 4; i < 8; i++) { 1074 for (i = 4; i < 8; i++)
1101 a->write_bcr(ioaddr, i, regs[i - 4]); 1075 a->write_bcr(ioaddr, i, regs[i - 4]);
1102 }
1103 spin_unlock_irqrestore(&lp->lock, flags); 1076 spin_unlock_irqrestore(&lp->lock, flags);
1104 1077
1105 return 0; 1078 return 0;
@@ -1136,10 +1109,8 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
1136 spin_lock_irqsave(&lp->lock, *flags); 1109 spin_lock_irqsave(&lp->lock, *flags);
1137 ticks++; 1110 ticks++;
1138 if (ticks > 200) { 1111 if (ticks > 200) {
1139 if (netif_msg_hw(lp)) 1112 netif_printk(lp, hw, KERN_DEBUG, dev,
1140 printk(KERN_DEBUG 1113 "Error getting into suspend!\n");
1141 "%s: Error getting into suspend!\n",
1142 dev->name);
1143 return 0; 1114 return 0;
1144 } 1115 }
1145 } 1116 }
@@ -1184,15 +1155,13 @@ static void pcnet32_rx_entry(struct net_device *dev,
1184 1155
1185 /* Discard oversize frames. */ 1156 /* Discard oversize frames. */
1186 if (unlikely(pkt_len > PKT_BUF_SIZE)) { 1157 if (unlikely(pkt_len > PKT_BUF_SIZE)) {
1187 if (netif_msg_drv(lp)) 1158 netif_err(lp, drv, dev, "Impossible packet size %d!\n",
1188 printk(KERN_ERR "%s: Impossible packet size %d!\n", 1159 pkt_len);
1189 dev->name, pkt_len);
1190 dev->stats.rx_errors++; 1160 dev->stats.rx_errors++;
1191 return; 1161 return;
1192 } 1162 }
1193 if (pkt_len < 60) { 1163 if (pkt_len < 60) {
1194 if (netif_msg_rx_err(lp)) 1164 netif_err(lp, rx_err, dev, "Runt packet!\n");
1195 printk(KERN_ERR "%s: Runt packet!\n", dev->name);
1196 dev->stats.rx_errors++; 1165 dev->stats.rx_errors++;
1197 return; 1166 return;
1198 } 1167 }
@@ -1200,7 +1169,8 @@ static void pcnet32_rx_entry(struct net_device *dev,
1200 if (pkt_len > rx_copybreak) { 1169 if (pkt_len > rx_copybreak) {
1201 struct sk_buff *newskb; 1170 struct sk_buff *newskb;
1202 1171
1203 if ((newskb = dev_alloc_skb(PKT_BUF_SKB))) { 1172 newskb = dev_alloc_skb(PKT_BUF_SKB);
1173 if (newskb) {
1204 skb_reserve(newskb, NET_IP_ALIGN); 1174 skb_reserve(newskb, NET_IP_ALIGN);
1205 skb = lp->rx_skbuff[entry]; 1175 skb = lp->rx_skbuff[entry];
1206 pci_unmap_single(lp->pci_dev, 1176 pci_unmap_single(lp->pci_dev,
@@ -1218,15 +1188,11 @@ static void pcnet32_rx_entry(struct net_device *dev,
1218 rx_in_place = 1; 1188 rx_in_place = 1;
1219 } else 1189 } else
1220 skb = NULL; 1190 skb = NULL;
1221 } else { 1191 } else
1222 skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN); 1192 skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN);
1223 }
1224 1193
1225 if (skb == NULL) { 1194 if (skb == NULL) {
1226 if (netif_msg_drv(lp)) 1195 netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n");
1227 printk(KERN_ERR
1228 "%s: Memory squeeze, dropping packet.\n",
1229 dev->name);
1230 dev->stats.rx_dropped++; 1196 dev->stats.rx_dropped++;
1231 return; 1197 return;
1232 } 1198 }
@@ -1297,11 +1263,9 @@ static int pcnet32_tx(struct net_device *dev)
1297 /* There was a major error, log it. */ 1263 /* There was a major error, log it. */
1298 int err_status = le32_to_cpu(lp->tx_ring[entry].misc); 1264 int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
1299 dev->stats.tx_errors++; 1265 dev->stats.tx_errors++;
1300 if (netif_msg_tx_err(lp)) 1266 netif_err(lp, tx_err, dev,
1301 printk(KERN_ERR 1267 "Tx error status=%04x err_status=%08x\n",
1302 "%s: Tx error status=%04x err_status=%08x\n", 1268 status, err_status);
1303 dev->name, status,
1304 err_status);
1305 if (err_status & 0x04000000) 1269 if (err_status & 0x04000000)
1306 dev->stats.tx_aborted_errors++; 1270 dev->stats.tx_aborted_errors++;
1307 if (err_status & 0x08000000) 1271 if (err_status & 0x08000000)
@@ -1313,10 +1277,7 @@ static int pcnet32_tx(struct net_device *dev)
1313 dev->stats.tx_fifo_errors++; 1277 dev->stats.tx_fifo_errors++;
1314 /* Ackk! On FIFO errors the Tx unit is turned off! */ 1278 /* Ackk! On FIFO errors the Tx unit is turned off! */
1315 /* Remove this verbosity later! */ 1279 /* Remove this verbosity later! */
1316 if (netif_msg_tx_err(lp)) 1280 netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
1317 printk(KERN_ERR
1318 "%s: Tx FIFO error!\n",
1319 dev->name);
1320 must_restart = 1; 1281 must_restart = 1;
1321 } 1282 }
1322#else 1283#else
@@ -1325,10 +1286,7 @@ static int pcnet32_tx(struct net_device *dev)
1325 if (!lp->dxsuflo) { /* If controller doesn't recover ... */ 1286 if (!lp->dxsuflo) { /* If controller doesn't recover ... */
1326 /* Ackk! On FIFO errors the Tx unit is turned off! */ 1287 /* Ackk! On FIFO errors the Tx unit is turned off! */
1327 /* Remove this verbosity later! */ 1288 /* Remove this verbosity later! */
1328 if (netif_msg_tx_err(lp)) 1289 netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
1329 printk(KERN_ERR
1330 "%s: Tx FIFO error!\n",
1331 dev->name);
1332 must_restart = 1; 1290 must_restart = 1;
1333 } 1291 }
1334 } 1292 }
@@ -1354,11 +1312,8 @@ static int pcnet32_tx(struct net_device *dev)
1354 1312
1355 delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size); 1313 delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size);
1356 if (delta > lp->tx_ring_size) { 1314 if (delta > lp->tx_ring_size) {
1357 if (netif_msg_drv(lp)) 1315 netif_err(lp, drv, dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n",
1358 printk(KERN_ERR 1316 dirty_tx, lp->cur_tx, lp->tx_full);
1359 "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1360 dev->name, dirty_tx, lp->cur_tx,
1361 lp->tx_full);
1362 dirty_tx += lp->tx_ring_size; 1317 dirty_tx += lp->tx_ring_size;
1363 delta -= lp->tx_ring_size; 1318 delta -= lp->tx_ring_size;
1364 } 1319 }
@@ -1421,7 +1376,7 @@ static int pcnet32_get_regs_len(struct net_device *dev)
1421 struct pcnet32_private *lp = netdev_priv(dev); 1376 struct pcnet32_private *lp = netdev_priv(dev);
1422 int j = lp->phycount * PCNET32_REGS_PER_PHY; 1377 int j = lp->phycount * PCNET32_REGS_PER_PHY;
1423 1378
1424 return ((PCNET32_NUM_REGS + j) * sizeof(u16)); 1379 return (PCNET32_NUM_REGS + j) * sizeof(u16);
1425} 1380}
1426 1381
1427static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, 1382static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
@@ -1445,21 +1400,20 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1445 *buff++ = inw(ioaddr + i); 1400 *buff++ = inw(ioaddr + i);
1446 1401
1447 /* read control and status registers */ 1402 /* read control and status registers */
1448 for (i = 0; i < 90; i++) { 1403 for (i = 0; i < 90; i++)
1449 *buff++ = a->read_csr(ioaddr, i); 1404 *buff++ = a->read_csr(ioaddr, i);
1450 }
1451 1405
1452 *buff++ = a->read_csr(ioaddr, 112); 1406 *buff++ = a->read_csr(ioaddr, 112);
1453 *buff++ = a->read_csr(ioaddr, 114); 1407 *buff++ = a->read_csr(ioaddr, 114);
1454 1408
1455 /* read bus configuration registers */ 1409 /* read bus configuration registers */
1456 for (i = 0; i < 30; i++) { 1410 for (i = 0; i < 30; i++)
1457 *buff++ = a->read_bcr(ioaddr, i); 1411 *buff++ = a->read_bcr(ioaddr, i);
1458 } 1412
1459 *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */ 1413 *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */
1460 for (i = 31; i < 36; i++) { 1414
1415 for (i = 31; i < 36; i++)
1461 *buff++ = a->read_bcr(ioaddr, i); 1416 *buff++ = a->read_bcr(ioaddr, i);
1462 }
1463 1417
1464 /* read mii phy registers */ 1418 /* read mii phy registers */
1465 if (lp->mii) { 1419 if (lp->mii) {
@@ -1535,8 +1489,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
1535 err = pci_enable_device(pdev); 1489 err = pci_enable_device(pdev);
1536 if (err < 0) { 1490 if (err < 0) {
1537 if (pcnet32_debug & NETIF_MSG_PROBE) 1491 if (pcnet32_debug & NETIF_MSG_PROBE)
1538 printk(KERN_ERR PFX 1492 pr_err("failed to enable device -- err=%d\n", err);
1539 "failed to enable device -- err=%d\n", err);
1540 return err; 1493 return err;
1541 } 1494 }
1542 pci_set_master(pdev); 1495 pci_set_master(pdev);
@@ -1544,29 +1497,25 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
1544 ioaddr = pci_resource_start(pdev, 0); 1497 ioaddr = pci_resource_start(pdev, 0);
1545 if (!ioaddr) { 1498 if (!ioaddr) {
1546 if (pcnet32_debug & NETIF_MSG_PROBE) 1499 if (pcnet32_debug & NETIF_MSG_PROBE)
1547 printk(KERN_ERR PFX 1500 pr_err("card has no PCI IO resources, aborting\n");
1548 "card has no PCI IO resources, aborting\n");
1549 return -ENODEV; 1501 return -ENODEV;
1550 } 1502 }
1551 1503
1552 if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) { 1504 if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) {
1553 if (pcnet32_debug & NETIF_MSG_PROBE) 1505 if (pcnet32_debug & NETIF_MSG_PROBE)
1554 printk(KERN_ERR PFX 1506 pr_err("architecture does not support 32bit PCI busmaster DMA\n");
1555 "architecture does not support 32bit PCI busmaster DMA\n");
1556 return -ENODEV; 1507 return -ENODEV;
1557 } 1508 }
1558 if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci") == 1509 if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
1559 NULL) {
1560 if (pcnet32_debug & NETIF_MSG_PROBE) 1510 if (pcnet32_debug & NETIF_MSG_PROBE)
1561 printk(KERN_ERR PFX 1511 pr_err("io address range already allocated\n");
1562 "io address range already allocated\n");
1563 return -EBUSY; 1512 return -EBUSY;
1564 } 1513 }
1565 1514
1566 err = pcnet32_probe1(ioaddr, 1, pdev); 1515 err = pcnet32_probe1(ioaddr, 1, pdev);
1567 if (err < 0) { 1516 if (err < 0)
1568 pci_disable_device(pdev); 1517 pci_disable_device(pdev);
1569 } 1518
1570 return err; 1519 return err;
1571} 1520}
1572 1521
@@ -1616,7 +1565,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1616 a = &pcnet32_dwio; 1565 a = &pcnet32_dwio;
1617 } else { 1566 } else {
1618 if (pcnet32_debug & NETIF_MSG_PROBE) 1567 if (pcnet32_debug & NETIF_MSG_PROBE)
1619 printk(KERN_ERR PFX "No access methods\n"); 1568 pr_err("No access methods\n");
1620 goto err_release_region; 1569 goto err_release_region;
1621 } 1570 }
1622 } 1571 }
@@ -1624,11 +1573,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1624 chip_version = 1573 chip_version =
1625 a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16); 1574 a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16);
1626 if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW)) 1575 if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
1627 printk(KERN_INFO " PCnet chip version is %#x.\n", 1576 pr_info(" PCnet chip version is %#x\n", chip_version);
1628 chip_version);
1629 if ((chip_version & 0xfff) != 0x003) { 1577 if ((chip_version & 0xfff) != 0x003) {
1630 if (pcnet32_debug & NETIF_MSG_PROBE) 1578 if (pcnet32_debug & NETIF_MSG_PROBE)
1631 printk(KERN_INFO PFX "Unsupported chip version.\n"); 1579 pr_info("Unsupported chip version\n");
1632 goto err_release_region; 1580 goto err_release_region;
1633 } 1581 }
1634 1582
@@ -1681,7 +1629,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1681 if (cards_found < MAX_UNITS && homepna[cards_found]) 1629 if (cards_found < MAX_UNITS && homepna[cards_found])
1682 media |= 1; /* switch to home wiring mode */ 1630 media |= 1; /* switch to home wiring mode */
1683 if (pcnet32_debug & NETIF_MSG_PROBE) 1631 if (pcnet32_debug & NETIF_MSG_PROBE)
1684 printk(KERN_DEBUG PFX "media set to %sMbit mode.\n", 1632 printk(KERN_DEBUG PFX "media set to %sMbit mode\n",
1685 (media & 1) ? "1" : "10"); 1633 (media & 1) ? "1" : "10");
1686 a->write_bcr(ioaddr, 49, media); 1634 a->write_bcr(ioaddr, 49, media);
1687 break; 1635 break;
@@ -1697,9 +1645,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1697 break; 1645 break;
1698 default: 1646 default:
1699 if (pcnet32_debug & NETIF_MSG_PROBE) 1647 if (pcnet32_debug & NETIF_MSG_PROBE)
1700 printk(KERN_INFO PFX 1648 pr_info("PCnet version %#x, no PCnet32 chip\n",
1701 "PCnet version %#x, no PCnet32 chip.\n", 1649 chip_version);
1702 chip_version);
1703 goto err_release_region; 1650 goto err_release_region;
1704 } 1651 }
1705 1652
@@ -1721,7 +1668,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1721 dev = alloc_etherdev(sizeof(*lp)); 1668 dev = alloc_etherdev(sizeof(*lp));
1722 if (!dev) { 1669 if (!dev) {
1723 if (pcnet32_debug & NETIF_MSG_PROBE) 1670 if (pcnet32_debug & NETIF_MSG_PROBE)
1724 printk(KERN_ERR PFX "Memory allocation failed.\n"); 1671 pr_err("Memory allocation failed\n");
1725 ret = -ENOMEM; 1672 ret = -ENOMEM;
1726 goto err_release_region; 1673 goto err_release_region;
1727 } 1674 }
@@ -1730,7 +1677,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1730 SET_NETDEV_DEV(dev, &pdev->dev); 1677 SET_NETDEV_DEV(dev, &pdev->dev);
1731 1678
1732 if (pcnet32_debug & NETIF_MSG_PROBE) 1679 if (pcnet32_debug & NETIF_MSG_PROBE)
1733 printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr); 1680 pr_info("%s at %#3lx,", chipname, ioaddr);
1734 1681
1735 /* In most chips, after a chip reset, the ethernet address is read from the 1682 /* In most chips, after a chip reset, the ethernet address is read from the
1736 * station address PROM at the base address and programmed into the 1683 * station address PROM at the base address and programmed into the
@@ -1755,9 +1702,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1755 !is_valid_ether_addr(dev->dev_addr)) { 1702 !is_valid_ether_addr(dev->dev_addr)) {
1756 if (is_valid_ether_addr(promaddr)) { 1703 if (is_valid_ether_addr(promaddr)) {
1757 if (pcnet32_debug & NETIF_MSG_PROBE) { 1704 if (pcnet32_debug & NETIF_MSG_PROBE) {
1758 printk(" warning: CSR address invalid,\n"); 1705 pr_cont(" warning: CSR address invalid,\n");
1759 printk(KERN_INFO 1706 pr_info(" using instead PROM address of");
1760 " using instead PROM address of");
1761 } 1707 }
1762 memcpy(dev->dev_addr, promaddr, 6); 1708 memcpy(dev->dev_addr, promaddr, 6);
1763 } 1709 }
@@ -1769,54 +1715,54 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1769 memset(dev->dev_addr, 0, ETH_ALEN); 1715 memset(dev->dev_addr, 0, ETH_ALEN);
1770 1716
1771 if (pcnet32_debug & NETIF_MSG_PROBE) { 1717 if (pcnet32_debug & NETIF_MSG_PROBE) {
1772 printk(" %pM", dev->dev_addr); 1718 pr_cont(" %pM", dev->dev_addr);
1773 1719
1774 /* Version 0x2623 and 0x2624 */ 1720 /* Version 0x2623 and 0x2624 */
1775 if (((chip_version + 1) & 0xfffe) == 0x2624) { 1721 if (((chip_version + 1) & 0xfffe) == 0x2624) {
1776 i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */ 1722 i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
1777 printk(KERN_INFO " tx_start_pt(0x%04x):", i); 1723 pr_info(" tx_start_pt(0x%04x):", i);
1778 switch (i >> 10) { 1724 switch (i >> 10) {
1779 case 0: 1725 case 0:
1780 printk(KERN_CONT " 20 bytes,"); 1726 pr_cont(" 20 bytes,");
1781 break; 1727 break;
1782 case 1: 1728 case 1:
1783 printk(KERN_CONT " 64 bytes,"); 1729 pr_cont(" 64 bytes,");
1784 break; 1730 break;
1785 case 2: 1731 case 2:
1786 printk(KERN_CONT " 128 bytes,"); 1732 pr_cont(" 128 bytes,");
1787 break; 1733 break;
1788 case 3: 1734 case 3:
1789 printk(KERN_CONT "~220 bytes,"); 1735 pr_cont("~220 bytes,");
1790 break; 1736 break;
1791 } 1737 }
1792 i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */ 1738 i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
1793 printk(KERN_CONT " BCR18(%x):", i & 0xffff); 1739 pr_cont(" BCR18(%x):", i & 0xffff);
1794 if (i & (1 << 5)) 1740 if (i & (1 << 5))
1795 printk(KERN_CONT "BurstWrEn "); 1741 pr_cont("BurstWrEn ");
1796 if (i & (1 << 6)) 1742 if (i & (1 << 6))
1797 printk(KERN_CONT "BurstRdEn "); 1743 pr_cont("BurstRdEn ");
1798 if (i & (1 << 7)) 1744 if (i & (1 << 7))
1799 printk(KERN_CONT "DWordIO "); 1745 pr_cont("DWordIO ");
1800 if (i & (1 << 11)) 1746 if (i & (1 << 11))
1801 printk(KERN_CONT "NoUFlow "); 1747 pr_cont("NoUFlow ");
1802 i = a->read_bcr(ioaddr, 25); 1748 i = a->read_bcr(ioaddr, 25);
1803 printk(KERN_INFO " SRAMSIZE=0x%04x,", i << 8); 1749 pr_info(" SRAMSIZE=0x%04x,", i << 8);
1804 i = a->read_bcr(ioaddr, 26); 1750 i = a->read_bcr(ioaddr, 26);
1805 printk(KERN_CONT " SRAM_BND=0x%04x,", i << 8); 1751 pr_cont(" SRAM_BND=0x%04x,", i << 8);
1806 i = a->read_bcr(ioaddr, 27); 1752 i = a->read_bcr(ioaddr, 27);
1807 if (i & (1 << 14)) 1753 if (i & (1 << 14))
1808 printk(KERN_CONT "LowLatRx"); 1754 pr_cont("LowLatRx");
1809 } 1755 }
1810 } 1756 }
1811 1757
1812 dev->base_addr = ioaddr; 1758 dev->base_addr = ioaddr;
1813 lp = netdev_priv(dev); 1759 lp = netdev_priv(dev);
1814 /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */ 1760 /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
1815 if ((lp->init_block = 1761 lp->init_block = pci_alloc_consistent(pdev, sizeof(*lp->init_block),
1816 pci_alloc_consistent(pdev, sizeof(*lp->init_block), &lp->init_dma_addr)) == NULL) { 1762 &lp->init_dma_addr);
1763 if (!lp->init_block) {
1817 if (pcnet32_debug & NETIF_MSG_PROBE) 1764 if (pcnet32_debug & NETIF_MSG_PROBE)
1818 printk(KERN_ERR PFX 1765 pr_err("Consistent memory allocation failed\n");
1819 "Consistent memory allocation failed.\n");
1820 ret = -ENOMEM; 1766 ret = -ENOMEM;
1821 goto err_free_netdev; 1767 goto err_free_netdev;
1822 } 1768 }
@@ -1890,7 +1836,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1890 if (pdev) { /* use the IRQ provided by PCI */ 1836 if (pdev) { /* use the IRQ provided by PCI */
1891 dev->irq = pdev->irq; 1837 dev->irq = pdev->irq;
1892 if (pcnet32_debug & NETIF_MSG_PROBE) 1838 if (pcnet32_debug & NETIF_MSG_PROBE)
1893 printk(" assigned IRQ %d.\n", dev->irq); 1839 pr_cont(" assigned IRQ %d\n", dev->irq);
1894 } else { 1840 } else {
1895 unsigned long irq_mask = probe_irq_on(); 1841 unsigned long irq_mask = probe_irq_on();
1896 1842
@@ -1906,12 +1852,12 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1906 dev->irq = probe_irq_off(irq_mask); 1852 dev->irq = probe_irq_off(irq_mask);
1907 if (!dev->irq) { 1853 if (!dev->irq) {
1908 if (pcnet32_debug & NETIF_MSG_PROBE) 1854 if (pcnet32_debug & NETIF_MSG_PROBE)
1909 printk(", failed to detect IRQ line.\n"); 1855 pr_cont(", failed to detect IRQ line\n");
1910 ret = -ENODEV; 1856 ret = -ENODEV;
1911 goto err_free_ring; 1857 goto err_free_ring;
1912 } 1858 }
1913 if (pcnet32_debug & NETIF_MSG_PROBE) 1859 if (pcnet32_debug & NETIF_MSG_PROBE)
1914 printk(", probed IRQ %d.\n", dev->irq); 1860 pr_cont(", probed IRQ %d\n", dev->irq);
1915 } 1861 }
1916 1862
1917 /* Set the mii phy_id so that we can query the link state */ 1863 /* Set the mii phy_id so that we can query the link state */
@@ -1935,14 +1881,12 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1935 lp->phymask |= (1 << i); 1881 lp->phymask |= (1 << i);
1936 lp->mii_if.phy_id = i; 1882 lp->mii_if.phy_id = i;
1937 if (pcnet32_debug & NETIF_MSG_PROBE) 1883 if (pcnet32_debug & NETIF_MSG_PROBE)
1938 printk(KERN_INFO PFX 1884 pr_info("Found PHY %04x:%04x at address %d\n",
1939 "Found PHY %04x:%04x at address %d.\n", 1885 id1, id2, i);
1940 id1, id2, i);
1941 } 1886 }
1942 lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5); 1887 lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
1943 if (lp->phycount > 1) { 1888 if (lp->phycount > 1)
1944 lp->options |= PCNET32_PORT_MII; 1889 lp->options |= PCNET32_PORT_MII;
1945 }
1946 } 1890 }
1947 1891
1948 init_timer(&lp->watchdog_timer); 1892 init_timer(&lp->watchdog_timer);
@@ -1966,7 +1910,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1966 } 1910 }
1967 1911
1968 if (pcnet32_debug & NETIF_MSG_PROBE) 1912 if (pcnet32_debug & NETIF_MSG_PROBE)
1969 printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name); 1913 pr_info("%s: registered as %s\n", dev->name, lp->name);
1970 cards_found++; 1914 cards_found++;
1971 1915
1972 /* enable LED writes */ 1916 /* enable LED writes */
@@ -1995,10 +1939,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
1995 lp->tx_ring_size, 1939 lp->tx_ring_size,
1996 &lp->tx_ring_dma_addr); 1940 &lp->tx_ring_dma_addr);
1997 if (lp->tx_ring == NULL) { 1941 if (lp->tx_ring == NULL) {
1998 if (netif_msg_drv(lp)) 1942 netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
1999 printk(KERN_ERR PFX
2000 "%s: Consistent memory allocation failed.\n",
2001 name);
2002 return -ENOMEM; 1943 return -ENOMEM;
2003 } 1944 }
2004 1945
@@ -2007,46 +1948,35 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
2007 lp->rx_ring_size, 1948 lp->rx_ring_size,
2008 &lp->rx_ring_dma_addr); 1949 &lp->rx_ring_dma_addr);
2009 if (lp->rx_ring == NULL) { 1950 if (lp->rx_ring == NULL) {
2010 if (netif_msg_drv(lp)) 1951 netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
2011 printk(KERN_ERR PFX
2012 "%s: Consistent memory allocation failed.\n",
2013 name);
2014 return -ENOMEM; 1952 return -ENOMEM;
2015 } 1953 }
2016 1954
2017 lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t), 1955 lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
2018 GFP_ATOMIC); 1956 GFP_ATOMIC);
2019 if (!lp->tx_dma_addr) { 1957 if (!lp->tx_dma_addr) {
2020 if (netif_msg_drv(lp)) 1958 netif_err(lp, drv, dev, "Memory allocation failed\n");
2021 printk(KERN_ERR PFX
2022 "%s: Memory allocation failed.\n", name);
2023 return -ENOMEM; 1959 return -ENOMEM;
2024 } 1960 }
2025 1961
2026 lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t), 1962 lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
2027 GFP_ATOMIC); 1963 GFP_ATOMIC);
2028 if (!lp->rx_dma_addr) { 1964 if (!lp->rx_dma_addr) {
2029 if (netif_msg_drv(lp)) 1965 netif_err(lp, drv, dev, "Memory allocation failed\n");
2030 printk(KERN_ERR PFX
2031 "%s: Memory allocation failed.\n", name);
2032 return -ENOMEM; 1966 return -ENOMEM;
2033 } 1967 }
2034 1968
2035 lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *), 1969 lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
2036 GFP_ATOMIC); 1970 GFP_ATOMIC);
2037 if (!lp->tx_skbuff) { 1971 if (!lp->tx_skbuff) {
2038 if (netif_msg_drv(lp)) 1972 netif_err(lp, drv, dev, "Memory allocation failed\n");
2039 printk(KERN_ERR PFX
2040 "%s: Memory allocation failed.\n", name);
2041 return -ENOMEM; 1973 return -ENOMEM;
2042 } 1974 }
2043 1975
2044 lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *), 1976 lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
2045 GFP_ATOMIC); 1977 GFP_ATOMIC);
2046 if (!lp->rx_skbuff) { 1978 if (!lp->rx_skbuff) {
2047 if (netif_msg_drv(lp)) 1979 netif_err(lp, drv, dev, "Memory allocation failed\n");
2048 printk(KERN_ERR PFX
2049 "%s: Memory allocation failed.\n", name);
2050 return -ENOMEM; 1980 return -ENOMEM;
2051 } 1981 }
2052 1982
@@ -2115,12 +2045,11 @@ static int pcnet32_open(struct net_device *dev)
2115 /* switch pcnet32 to 32bit mode */ 2045 /* switch pcnet32 to 32bit mode */
2116 lp->a.write_bcr(ioaddr, 20, 2); 2046 lp->a.write_bcr(ioaddr, 20, 2);
2117 2047
2118 if (netif_msg_ifup(lp)) 2048 netif_printk(lp, ifup, KERN_DEBUG, dev,
2119 printk(KERN_DEBUG 2049 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
2120 "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n", 2050 __func__, dev->irq, (u32) (lp->tx_ring_dma_addr),
2121 dev->name, dev->irq, (u32) (lp->tx_ring_dma_addr), 2051 (u32) (lp->rx_ring_dma_addr),
2122 (u32) (lp->rx_ring_dma_addr), 2052 (u32) (lp->init_dma_addr));
2123 (u32) (lp->init_dma_addr));
2124 2053
2125 /* set/reset autoselect bit */ 2054 /* set/reset autoselect bit */
2126 val = lp->a.read_bcr(ioaddr, 2) & ~2; 2055 val = lp->a.read_bcr(ioaddr, 2) & ~2;
@@ -2155,10 +2084,8 @@ static int pcnet32_open(struct net_device *dev)
2155 pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { 2084 pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
2156 if (lp->options & PCNET32_PORT_ASEL) { 2085 if (lp->options & PCNET32_PORT_ASEL) {
2157 lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; 2086 lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
2158 if (netif_msg_link(lp)) 2087 netif_printk(lp, link, KERN_DEBUG, dev,
2159 printk(KERN_DEBUG 2088 "Setting 100Mb-Full Duplex\n");
2160 "%s: Setting 100Mb-Full Duplex.\n",
2161 dev->name);
2162 } 2089 }
2163 } 2090 }
2164 if (lp->phycount < 2) { 2091 if (lp->phycount < 2) {
@@ -2246,9 +2173,7 @@ static int pcnet32_open(struct net_device *dev)
2246 } 2173 }
2247 } 2174 }
2248 lp->mii_if.phy_id = first_phy; 2175 lp->mii_if.phy_id = first_phy;
2249 if (netif_msg_link(lp)) 2176 netif_info(lp, link, dev, "Using PHY number %d\n", first_phy);
2250 printk(KERN_INFO "%s: Using PHY number %d.\n",
2251 dev->name, first_phy);
2252 } 2177 }
2253 2178
2254#ifdef DO_DXSUFLO 2179#ifdef DO_DXSUFLO
@@ -2295,18 +2220,17 @@ static int pcnet32_open(struct net_device *dev)
2295 */ 2220 */
2296 lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL); 2221 lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
2297 2222
2298 if (netif_msg_ifup(lp)) 2223 netif_printk(lp, ifup, KERN_DEBUG, dev,
2299 printk(KERN_DEBUG 2224 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
2300 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n", 2225 i,
2301 dev->name, i, 2226 (u32) (lp->init_dma_addr),
2302 (u32) (lp->init_dma_addr), 2227 lp->a.read_csr(ioaddr, CSR0));
2303 lp->a.read_csr(ioaddr, CSR0));
2304 2228
2305 spin_unlock_irqrestore(&lp->lock, flags); 2229 spin_unlock_irqrestore(&lp->lock, flags);
2306 2230
2307 return 0; /* Always succeed */ 2231 return 0; /* Always succeed */
2308 2232
2309 err_free_ring: 2233err_free_ring:
2310 /* free any allocated skbuffs */ 2234 /* free any allocated skbuffs */
2311 pcnet32_purge_rx_ring(dev); 2235 pcnet32_purge_rx_ring(dev);
2312 2236
@@ -2316,7 +2240,7 @@ static int pcnet32_open(struct net_device *dev)
2316 */ 2240 */
2317 lp->a.write_bcr(ioaddr, 20, 4); 2241 lp->a.write_bcr(ioaddr, 20, 4);
2318 2242
2319 err_free_irq: 2243err_free_irq:
2320 spin_unlock_irqrestore(&lp->lock, flags); 2244 spin_unlock_irqrestore(&lp->lock, flags);
2321 free_irq(dev->irq, dev); 2245 free_irq(dev->irq, dev);
2322 return rc; 2246 return rc;
@@ -2367,14 +2291,12 @@ static int pcnet32_init_ring(struct net_device *dev)
2367 for (i = 0; i < lp->rx_ring_size; i++) { 2291 for (i = 0; i < lp->rx_ring_size; i++) {
2368 struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; 2292 struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
2369 if (rx_skbuff == NULL) { 2293 if (rx_skbuff == NULL) {
2370 if (! 2294 lp->rx_skbuff[i] = dev_alloc_skb(PKT_BUF_SKB);
2371 (rx_skbuff = lp->rx_skbuff[i] = 2295 rx_skbuff = lp->rx_skbuff[i];
2372 dev_alloc_skb(PKT_BUF_SKB))) { 2296 if (!rx_skbuff) {
2373 /* there is not much, we can do at this point */ 2297 /* there is not much we can do at this point */
2374 if (netif_msg_drv(lp)) 2298 netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n",
2375 printk(KERN_ERR 2299 __func__);
2376 "%s: pcnet32_init_ring dev_alloc_skb failed.\n",
2377 dev->name);
2378 return -1; 2300 return -1;
2379 } 2301 }
2380 skb_reserve(rx_skbuff, NET_IP_ALIGN); 2302 skb_reserve(rx_skbuff, NET_IP_ALIGN);
@@ -2424,10 +2346,9 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
2424 if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP) 2346 if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
2425 break; 2347 break;
2426 2348
2427 if (i >= 100 && netif_msg_drv(lp)) 2349 if (i >= 100)
2428 printk(KERN_ERR 2350 netif_err(lp, drv, dev, "%s timed out waiting for stop\n",
2429 "%s: pcnet32_restart timed out waiting for stop.\n", 2351 __func__);
2430 dev->name);
2431 2352
2432 pcnet32_purge_tx_ring(dev); 2353 pcnet32_purge_tx_ring(dev);
2433 if (pcnet32_init_ring(dev)) 2354 if (pcnet32_init_ring(dev))
@@ -2451,8 +2372,7 @@ static void pcnet32_tx_timeout(struct net_device *dev)
2451 spin_lock_irqsave(&lp->lock, flags); 2372 spin_lock_irqsave(&lp->lock, flags);
2452 /* Transmitter timeout, serious problems. */ 2373 /* Transmitter timeout, serious problems. */
2453 if (pcnet32_debug & NETIF_MSG_DRV) 2374 if (pcnet32_debug & NETIF_MSG_DRV)
2454 printk(KERN_ERR 2375 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
2455 "%s: transmit timed out, status %4.4x, resetting.\n",
2456 dev->name, lp->a.read_csr(ioaddr, CSR0)); 2376 dev->name, lp->a.read_csr(ioaddr, CSR0));
2457 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); 2377 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
2458 dev->stats.tx_errors++; 2378 dev->stats.tx_errors++;
@@ -2495,11 +2415,9 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
2495 2415
2496 spin_lock_irqsave(&lp->lock, flags); 2416 spin_lock_irqsave(&lp->lock, flags);
2497 2417
2498 if (netif_msg_tx_queued(lp)) { 2418 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
2499 printk(KERN_DEBUG 2419 "%s() called, csr0 %4.4x\n",
2500 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n", 2420 __func__, lp->a.read_csr(ioaddr, CSR0));
2501 dev->name, lp->a.read_csr(ioaddr, CSR0));
2502 }
2503 2421
2504 /* Default status -- will not enable Successful-TxDone 2422 /* Default status -- will not enable Successful-TxDone
2505 * interrupt when that option is available to us. 2423 * interrupt when that option is available to us.
@@ -2558,16 +2476,14 @@ pcnet32_interrupt(int irq, void *dev_id)
2558 2476
2559 csr0 = lp->a.read_csr(ioaddr, CSR0); 2477 csr0 = lp->a.read_csr(ioaddr, CSR0);
2560 while ((csr0 & 0x8f00) && --boguscnt >= 0) { 2478 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
2561 if (csr0 == 0xffff) { 2479 if (csr0 == 0xffff)
2562 break; /* PCMCIA remove happened */ 2480 break; /* PCMCIA remove happened */
2563 }
2564 /* Acknowledge all of the current interrupt sources ASAP. */ 2481 /* Acknowledge all of the current interrupt sources ASAP. */
2565 lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f); 2482 lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
2566 2483
2567 if (netif_msg_intr(lp)) 2484 netif_printk(lp, intr, KERN_DEBUG, dev,
2568 printk(KERN_DEBUG 2485 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
2569 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n", 2486 csr0, lp->a.read_csr(ioaddr, CSR0));
2570 dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
2571 2487
2572 /* Log misc errors. */ 2488 /* Log misc errors. */
2573 if (csr0 & 0x4000) 2489 if (csr0 & 0x4000)
@@ -2587,10 +2503,8 @@ pcnet32_interrupt(int irq, void *dev_id)
2587 dev->stats.rx_errors++; /* Missed a Rx frame. */ 2503 dev->stats.rx_errors++; /* Missed a Rx frame. */
2588 } 2504 }
2589 if (csr0 & 0x0800) { 2505 if (csr0 & 0x0800) {
2590 if (netif_msg_drv(lp)) 2506 netif_err(lp, drv, dev, "Bus master arbitration failure, status %4.4x\n",
2591 printk(KERN_ERR 2507 csr0);
2592 "%s: Bus master arbitration failure, status %4.4x.\n",
2593 dev->name, csr0);
2594 /* unlike for the lance, there is no restart needed */ 2508 /* unlike for the lance, there is no restart needed */
2595 } 2509 }
2596 if (napi_schedule_prep(&lp->napi)) { 2510 if (napi_schedule_prep(&lp->napi)) {
@@ -2606,9 +2520,9 @@ pcnet32_interrupt(int irq, void *dev_id)
2606 csr0 = lp->a.read_csr(ioaddr, CSR0); 2520 csr0 = lp->a.read_csr(ioaddr, CSR0);
2607 } 2521 }
2608 2522
2609 if (netif_msg_intr(lp)) 2523 netif_printk(lp, intr, KERN_DEBUG, dev,
2610 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n", 2524 "exiting interrupt, csr0=%#4.4x\n",
2611 dev->name, lp->a.read_csr(ioaddr, CSR0)); 2525 lp->a.read_csr(ioaddr, CSR0));
2612 2526
2613 spin_unlock(&lp->lock); 2527 spin_unlock(&lp->lock);
2614 2528
@@ -2630,10 +2544,9 @@ static int pcnet32_close(struct net_device *dev)
2630 2544
2631 dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112); 2545 dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
2632 2546
2633 if (netif_msg_ifdown(lp)) 2547 netif_printk(lp, ifdown, KERN_DEBUG, dev,
2634 printk(KERN_DEBUG 2548 "Shutting down ethercard, status was %2.2x\n",
2635 "%s: Shutting down ethercard, status was %2.2x.\n", 2549 lp->a.read_csr(ioaddr, CSR0));
2636 dev->name, lp->a.read_csr(ioaddr, CSR0));
2637 2550
2638 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */ 2551 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
2639 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); 2552 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
@@ -2677,7 +2590,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
2677 struct pcnet32_private *lp = netdev_priv(dev); 2590 struct pcnet32_private *lp = netdev_priv(dev);
2678 volatile struct pcnet32_init_block *ib = lp->init_block; 2591 volatile struct pcnet32_init_block *ib = lp->init_block;
2679 volatile __le16 *mcast_table = (__le16 *)ib->filter; 2592 volatile __le16 *mcast_table = (__le16 *)ib->filter;
2680 struct dev_mc_list *dmi = dev->mc_list; 2593 struct dev_mc_list *dmi;
2681 unsigned long ioaddr = dev->base_addr; 2594 unsigned long ioaddr = dev->base_addr;
2682 char *addrs; 2595 char *addrs;
2683 int i; 2596 int i;
@@ -2698,9 +2611,8 @@ static void pcnet32_load_multicast(struct net_device *dev)
2698 ib->filter[1] = 0; 2611 ib->filter[1] = 0;
2699 2612
2700 /* Add addresses */ 2613 /* Add addresses */
2701 for (i = 0; i < dev->mc_count; i++) { 2614 netdev_for_each_mc_addr(dmi, dev) {
2702 addrs = dmi->dmi_addr; 2615 addrs = dmi->dmi_addr;
2703 dmi = dmi->next;
2704 2616
2705 /* multicast address? */ 2617 /* multicast address? */
2706 if (!(*addrs & 1)) 2618 if (!(*addrs & 1))
@@ -2730,9 +2642,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
2730 csr15 = lp->a.read_csr(ioaddr, CSR15); 2642 csr15 = lp->a.read_csr(ioaddr, CSR15);
2731 if (dev->flags & IFF_PROMISC) { 2643 if (dev->flags & IFF_PROMISC) {
2732 /* Log any net taps. */ 2644 /* Log any net taps. */
2733 if (netif_msg_hw(lp)) 2645 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
2734 printk(KERN_INFO "%s: Promiscuous mode enabled.\n",
2735 dev->name);
2736 lp->init_block->mode = 2646 lp->init_block->mode =
2737 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << 2647 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
2738 7); 2648 7);
@@ -2819,10 +2729,8 @@ static int pcnet32_check_otherphy(struct net_device *dev)
2819 mii.phy_id = i; 2729 mii.phy_id = i;
2820 if (mii_link_ok(&mii)) { 2730 if (mii_link_ok(&mii)) {
2821 /* found PHY with active link */ 2731 /* found PHY with active link */
2822 if (netif_msg_link(lp)) 2732 netif_info(lp, link, dev, "Using PHY number %d\n",
2823 printk(KERN_INFO 2733 i);
2824 "%s: Using PHY number %d.\n",
2825 dev->name, i);
2826 2734
2827 /* isolate inactive phy */ 2735 /* isolate inactive phy */
2828 bmcr = 2736 bmcr =
@@ -2868,8 +2776,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
2868 if (!curr_link) { 2776 if (!curr_link) {
2869 if (prev_link || verbose) { 2777 if (prev_link || verbose) {
2870 netif_carrier_off(dev); 2778 netif_carrier_off(dev);
2871 if (netif_msg_link(lp)) 2779 netif_info(lp, link, dev, "link down\n");
2872 printk(KERN_INFO "%s: link down\n", dev->name);
2873 } 2780 }
2874 if (lp->phycount > 1) { 2781 if (lp->phycount > 1) {
2875 curr_link = pcnet32_check_otherphy(dev); 2782 curr_link = pcnet32_check_otherphy(dev);
@@ -2881,12 +2788,11 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
2881 if (netif_msg_link(lp)) { 2788 if (netif_msg_link(lp)) {
2882 struct ethtool_cmd ecmd; 2789 struct ethtool_cmd ecmd;
2883 mii_ethtool_gset(&lp->mii_if, &ecmd); 2790 mii_ethtool_gset(&lp->mii_if, &ecmd);
2884 printk(KERN_INFO 2791 netdev_info(dev, "link up, %sMbps, %s-duplex\n",
2885 "%s: link up, %sMbps, %s-duplex\n", 2792 (ecmd.speed == SPEED_100)
2886 dev->name, 2793 ? "100" : "10",
2887 (ecmd.speed == SPEED_100) ? "100" : "10", 2794 (ecmd.duplex == DUPLEX_FULL)
2888 (ecmd.duplex == 2795 ? "full" : "half");
2889 DUPLEX_FULL) ? "full" : "half");
2890 } 2796 }
2891 bcr9 = lp->a.read_bcr(dev->base_addr, 9); 2797 bcr9 = lp->a.read_bcr(dev->base_addr, 9);
2892 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) { 2798 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
@@ -2897,8 +2803,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
2897 lp->a.write_bcr(dev->base_addr, 9, bcr9); 2803 lp->a.write_bcr(dev->base_addr, 9, bcr9);
2898 } 2804 }
2899 } else { 2805 } else {
2900 if (netif_msg_link(lp)) 2806 netif_info(lp, link, dev, "link up\n");
2901 printk(KERN_INFO "%s: link up\n", dev->name);
2902 } 2807 }
2903 } 2808 }
2904} 2809}
@@ -3010,7 +2915,7 @@ MODULE_LICENSE("GPL");
3010 2915
3011static int __init pcnet32_init_module(void) 2916static int __init pcnet32_init_module(void)
3012{ 2917{
3013 printk(KERN_INFO "%s", version); 2918 pr_info("%s", version);
3014 2919
3015 pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT); 2920 pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
3016 2921
@@ -3026,7 +2931,7 @@ static int __init pcnet32_init_module(void)
3026 pcnet32_probe_vlbus(pcnet32_portlist); 2931 pcnet32_probe_vlbus(pcnet32_portlist);
3027 2932
3028 if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE)) 2933 if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
3029 printk(KERN_INFO PFX "%d cards_found.\n", cards_found); 2934 pr_info("%d cards_found\n", cards_found);
3030 2935
3031 return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV; 2936 return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
3032} 2937}
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 33c4b12a63ba..f482fc4f8cf1 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -18,9 +18,6 @@
18#include <linux/phy.h> 18#include <linux/phy.h>
19#include <linux/brcmphy.h> 19#include <linux/brcmphy.h>
20 20
21#define PHY_ID_BCM50610 0x0143bd60
22#define PHY_ID_BCM50610M 0x0143bd70
23#define PHY_ID_BCM57780 0x03625d90
24 21
25#define BRCM_PHY_MODEL(phydev) \ 22#define BRCM_PHY_MODEL(phydev) \
26 ((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask) 23 ((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask)
@@ -823,7 +820,7 @@ static struct phy_driver bcm57780_driver = {
823}; 820};
824 821
825static struct phy_driver bcmac131_driver = { 822static struct phy_driver bcmac131_driver = {
826 .phy_id = 0x0143bc70, 823 .phy_id = PHY_ID_BCMAC131,
827 .phy_id_mask = 0xfffffff0, 824 .phy_id_mask = 0xfffffff0,
828 .name = "Broadcom BCMAC131", 825 .name = "Broadcom BCMAC131",
829 .features = PHY_BASIC_FEATURES | 826 .features = PHY_BASIC_FEATURES |
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 6f69b9ba0df8..65ed385c2ceb 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -63,6 +63,7 @@
63#define MII_M1111_HWCFG_MODE_COPPER_RGMII 0xb 63#define MII_M1111_HWCFG_MODE_COPPER_RGMII 0xb
64#define MII_M1111_HWCFG_MODE_FIBER_RGMII 0x3 64#define MII_M1111_HWCFG_MODE_FIBER_RGMII 0x3
65#define MII_M1111_HWCFG_MODE_SGMII_NO_CLK 0x4 65#define MII_M1111_HWCFG_MODE_SGMII_NO_CLK 0x4
66#define MII_M1111_HWCFG_MODE_COPPER_RTBI 0x9
66#define MII_M1111_HWCFG_FIBER_COPPER_AUTO 0x8000 67#define MII_M1111_HWCFG_FIBER_COPPER_AUTO 0x8000
67#define MII_M1111_HWCFG_FIBER_COPPER_RES 0x2000 68#define MII_M1111_HWCFG_FIBER_COPPER_RES 0x2000
68 69
@@ -269,6 +270,43 @@ static int m88e1111_config_init(struct phy_device *phydev)
269 return err; 270 return err;
270 } 271 }
271 272
273 if (phydev->interface == PHY_INTERFACE_MODE_RTBI) {
274 temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
275 if (temp < 0)
276 return temp;
277 temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY);
278 err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp);
279 if (err < 0)
280 return err;
281
282 temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
283 if (temp < 0)
284 return temp;
285 temp &= ~(MII_M1111_HWCFG_MODE_MASK | MII_M1111_HWCFG_FIBER_COPPER_RES);
286 temp |= 0x7 | MII_M1111_HWCFG_FIBER_COPPER_AUTO;
287 err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
288 if (err < 0)
289 return err;
290
291 /* soft reset */
292 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
293 if (err < 0)
294 return err;
295 do
296 temp = phy_read(phydev, MII_BMCR);
297 while (temp & BMCR_RESET);
298
299 temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
300 if (temp < 0)
301 return temp;
302 temp &= ~(MII_M1111_HWCFG_MODE_MASK | MII_M1111_HWCFG_FIBER_COPPER_RES);
303 temp |= MII_M1111_HWCFG_MODE_COPPER_RTBI | MII_M1111_HWCFG_FIBER_COPPER_AUTO;
304 err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
305 if (err < 0)
306 return err;
307 }
308
309
272 err = phy_write(phydev, MII_BMCR, BMCR_RESET); 310 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
273 if (err < 0) 311 if (err < 0)
274 return err; 312 return err;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index adbc0fded130..db1794546c56 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -277,6 +277,22 @@ int phy_device_register(struct phy_device *phydev)
277EXPORT_SYMBOL(phy_device_register); 277EXPORT_SYMBOL(phy_device_register);
278 278
279/** 279/**
280 * phy_find_first - finds the first PHY device on the bus
281 * @bus: the target MII bus
282 */
283struct phy_device *phy_find_first(struct mii_bus *bus)
284{
285 int addr;
286
287 for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
288 if (bus->phy_map[addr])
289 return bus->phy_map[addr];
290 }
291 return NULL;
292}
293EXPORT_SYMBOL(phy_find_first);
294
295/**
280 * phy_prepare_link - prepares the PHY layer to monitor link status 296 * phy_prepare_link - prepares the PHY layer to monitor link status
281 * @phydev: target phy_device struct 297 * @phydev: target phy_device struct
282 * @handler: callback function for link status change notifications 298 * @handler: callback function for link status change notifications
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 5123bb954dd7..ed2644a57500 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -25,6 +25,7 @@
25 25
26#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */ 26#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */
27#define MII_LAN83C185_IM 30 /* Interrupt Mask */ 27#define MII_LAN83C185_IM 30 /* Interrupt Mask */
28#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */
28 29
29#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */ 30#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */
30#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */ 31#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */
@@ -37,8 +38,10 @@
37#define MII_LAN83C185_ISF_INT_ALL (0x0e) 38#define MII_LAN83C185_ISF_INT_ALL (0x0e)
38 39
39#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \ 40#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \
40 (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4) 41 (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \
42 MII_LAN83C185_ISF_INT7)
41 43
44#define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */
42 45
43static int smsc_phy_config_intr(struct phy_device *phydev) 46static int smsc_phy_config_intr(struct phy_device *phydev)
44{ 47{
@@ -59,9 +62,23 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
59 62
60static int smsc_phy_config_init(struct phy_device *phydev) 63static int smsc_phy_config_init(struct phy_device *phydev)
61{ 64{
65 int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
66 if (rc < 0)
67 return rc;
68
69 /* Enable energy detect mode for this SMSC Transceivers */
70 rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
71 rc | MII_LAN83C185_EDPWRDOWN);
72 if (rc < 0)
73 return rc;
74
62 return smsc_phy_ack_interrupt (phydev); 75 return smsc_phy_ack_interrupt (phydev);
63} 76}
64 77
78static int lan911x_config_init(struct phy_device *phydev)
79{
80 return smsc_phy_ack_interrupt(phydev);
81}
65 82
66static struct phy_driver lan83c185_driver = { 83static struct phy_driver lan83c185_driver = {
67 .phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */ 84 .phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
@@ -147,7 +164,7 @@ static struct phy_driver lan911x_int_driver = {
147 /* basic functions */ 164 /* basic functions */
148 .config_aneg = genphy_config_aneg, 165 .config_aneg = genphy_config_aneg,
149 .read_status = genphy_read_status, 166 .read_status = genphy_read_status,
150 .config_init = smsc_phy_config_init, 167 .config_init = lan911x_config_init,
151 168
152 /* IRQ related */ 169 /* IRQ related */
153 .ack_interrupt = smsc_phy_ack_interrupt, 170 .ack_interrupt = smsc_phy_ack_interrupt,
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 2282e729edbe..6d61602208c1 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -167,7 +167,7 @@ struct channel {
167 u8 avail; /* flag used in multilink stuff */ 167 u8 avail; /* flag used in multilink stuff */
168 u8 had_frag; /* >= 1 fragments have been sent */ 168 u8 had_frag; /* >= 1 fragments have been sent */
169 u32 lastseq; /* MP: last sequence # received */ 169 u32 lastseq; /* MP: last sequence # received */
170 int speed; /* speed of the corresponding ppp channel*/ 170 int speed; /* speed of the corresponding ppp channel*/
171#endif /* CONFIG_PPP_MULTILINK */ 171#endif /* CONFIG_PPP_MULTILINK */
172}; 172};
173 173
@@ -1293,13 +1293,13 @@ ppp_push(struct ppp *ppp)
1293 */ 1293 */
1294static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) 1294static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1295{ 1295{
1296 int len, totlen; 1296 int len, totlen;
1297 int i, bits, hdrlen, mtu; 1297 int i, bits, hdrlen, mtu;
1298 int flen; 1298 int flen;
1299 int navail, nfree, nzero; 1299 int navail, nfree, nzero;
1300 int nbigger; 1300 int nbigger;
1301 int totspeed; 1301 int totspeed;
1302 int totfree; 1302 int totfree;
1303 unsigned char *p, *q; 1303 unsigned char *p, *q;
1304 struct list_head *list; 1304 struct list_head *list;
1305 struct channel *pch; 1305 struct channel *pch;
@@ -1307,21 +1307,21 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1307 struct ppp_channel *chan; 1307 struct ppp_channel *chan;
1308 1308
1309 totspeed = 0; /*total bitrate of the bundle*/ 1309 totspeed = 0; /*total bitrate of the bundle*/
1310 nfree = 0; /* # channels which have no packet already queued */ 1310 nfree = 0; /* # channels which have no packet already queued */
1311 navail = 0; /* total # of usable channels (not deregistered) */ 1311 navail = 0; /* total # of usable channels (not deregistered) */
1312 nzero = 0; /* number of channels with zero speed associated*/ 1312 nzero = 0; /* number of channels with zero speed associated*/
1313 totfree = 0; /*total # of channels available and 1313 totfree = 0; /*total # of channels available and
1314 *having no queued packets before 1314 *having no queued packets before
1315 *starting the fragmentation*/ 1315 *starting the fragmentation*/
1316 1316
1317 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 1317 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
1318 i = 0; 1318 i = 0;
1319 list_for_each_entry(pch, &ppp->channels, clist) { 1319 list_for_each_entry(pch, &ppp->channels, clist) {
1320 navail += pch->avail = (pch->chan != NULL); 1320 navail += pch->avail = (pch->chan != NULL);
1321 pch->speed = pch->chan->speed; 1321 pch->speed = pch->chan->speed;
1322 if (pch->avail) { 1322 if (pch->avail) {
1323 if (skb_queue_empty(&pch->file.xq) || 1323 if (skb_queue_empty(&pch->file.xq) ||
1324 !pch->had_frag) { 1324 !pch->had_frag) {
1325 if (pch->speed == 0) 1325 if (pch->speed == 0)
1326 nzero++; 1326 nzero++;
1327 else 1327 else
@@ -1331,60 +1331,60 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1331 ++nfree; 1331 ++nfree;
1332 ++totfree; 1332 ++totfree;
1333 } 1333 }
1334 if (!pch->had_frag && i < ppp->nxchan) 1334 if (!pch->had_frag && i < ppp->nxchan)
1335 ppp->nxchan = i; 1335 ppp->nxchan = i;
1336 } 1336 }
1337 ++i; 1337 ++i;
1338 } 1338 }
1339 /* 1339 /*
1340 * Don't start sending this packet unless at least half of 1340 * Don't start sending this packet unless at least half of
1341 * the channels are free. This gives much better TCP 1341 * the channels are free. This gives much better TCP
1342 * performance if we have a lot of channels. 1342 * performance if we have a lot of channels.
1343 */ 1343 */
1344 if (nfree == 0 || nfree < navail / 2) 1344 if (nfree == 0 || nfree < navail / 2)
1345 return 0; /* can't take now, leave it in xmit_pending */ 1345 return 0; /* can't take now, leave it in xmit_pending */
1346 1346
1347 /* Do protocol field compression (XXX this should be optional) */ 1347 /* Do protocol field compression (XXX this should be optional) */
1348 p = skb->data; 1348 p = skb->data;
1349 len = skb->len; 1349 len = skb->len;
1350 if (*p == 0) { 1350 if (*p == 0) {
1351 ++p; 1351 ++p;
1352 --len; 1352 --len;
1353 } 1353 }
1354 1354
1355 totlen = len; 1355 totlen = len;
1356 nbigger = len % nfree; 1356 nbigger = len % nfree;
1357 1357
1358 /* skip to the channel after the one we last used 1358 /* skip to the channel after the one we last used
1359 and start at that one */ 1359 and start at that one */
1360 list = &ppp->channels; 1360 list = &ppp->channels;
1361 for (i = 0; i < ppp->nxchan; ++i) { 1361 for (i = 0; i < ppp->nxchan; ++i) {
1362 list = list->next; 1362 list = list->next;
1363 if (list == &ppp->channels) { 1363 if (list == &ppp->channels) {
1364 i = 0; 1364 i = 0;
1365 break; 1365 break;
1366 } 1366 }
1367 } 1367 }
1368 1368
1369 /* create a fragment for each channel */ 1369 /* create a fragment for each channel */
1370 bits = B; 1370 bits = B;
1371 while (len > 0) { 1371 while (len > 0) {
1372 list = list->next; 1372 list = list->next;
1373 if (list == &ppp->channels) { 1373 if (list == &ppp->channels) {
1374 i = 0; 1374 i = 0;
1375 continue; 1375 continue;
1376 } 1376 }
1377 pch = list_entry(list, struct channel, clist); 1377 pch = list_entry(list, struct channel, clist);
1378 ++i; 1378 ++i;
1379 if (!pch->avail) 1379 if (!pch->avail)
1380 continue; 1380 continue;
1381 1381
1382 /* 1382 /*
1383 * Skip this channel if it has a fragment pending already and 1383 * Skip this channel if it has a fragment pending already and
1384 * we haven't given a fragment to all of the free channels. 1384 * we haven't given a fragment to all of the free channels.
1385 */ 1385 */
1386 if (pch->avail == 1) { 1386 if (pch->avail == 1) {
1387 if (nfree > 0) 1387 if (nfree > 0)
1388 continue; 1388 continue;
1389 } else { 1389 } else {
1390 pch->avail = 1; 1390 pch->avail = 1;
@@ -1393,32 +1393,32 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1393 /* check the channel's mtu and whether it is still attached. */ 1393 /* check the channel's mtu and whether it is still attached. */
1394 spin_lock_bh(&pch->downl); 1394 spin_lock_bh(&pch->downl);
1395 if (pch->chan == NULL) { 1395 if (pch->chan == NULL) {
1396 /* can't use this channel, it's being deregistered */ 1396 /* can't use this channel, it's being deregistered */
1397 if (pch->speed == 0) 1397 if (pch->speed == 0)
1398 nzero--; 1398 nzero--;
1399 else 1399 else
1400 totspeed -= pch->speed; 1400 totspeed -= pch->speed;
1401 1401
1402 spin_unlock_bh(&pch->downl); 1402 spin_unlock_bh(&pch->downl);
1403 pch->avail = 0; 1403 pch->avail = 0;
1404 totlen = len; 1404 totlen = len;
1405 totfree--; 1405 totfree--;
1406 nfree--; 1406 nfree--;
1407 if (--navail == 0) 1407 if (--navail == 0)
1408 break; 1408 break;
1409 continue; 1409 continue;
1410 } 1410 }
1411 1411
1412 /* 1412 /*
1413 *if the channel speed is not set divide 1413 *if the channel speed is not set divide
1414 *the packet evenly among the free channels; 1414 *the packet evenly among the free channels;
1415 *otherwise divide it according to the speed 1415 *otherwise divide it according to the speed
1416 *of the channel we are going to transmit on 1416 *of the channel we are going to transmit on
1417 */ 1417 */
1418 flen = len; 1418 flen = len;
1419 if (nfree > 0) { 1419 if (nfree > 0) {
1420 if (pch->speed == 0) { 1420 if (pch->speed == 0) {
1421 flen = totlen/nfree ; 1421 flen = totlen/nfree;
1422 if (nbigger > 0) { 1422 if (nbigger > 0) {
1423 flen++; 1423 flen++;
1424 nbigger--; 1424 nbigger--;
@@ -1436,8 +1436,8 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1436 } 1436 }
1437 1437
1438 /* 1438 /*
1439 *check if we are on the last channel or 1439 *check if we are on the last channel or
1440 *we exceded the lenght of the data to 1440 *we exceded the lenght of the data to
1441 *fragment 1441 *fragment
1442 */ 1442 */
1443 if ((nfree <= 0) || (flen > len)) 1443 if ((nfree <= 0) || (flen > len))
@@ -1448,29 +1448,29 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1448 *above formula will be equal or less than zero. 1448 *above formula will be equal or less than zero.
1449 *Skip the channel in this case 1449 *Skip the channel in this case
1450 */ 1450 */
1451 if (flen <= 0) { 1451 if (flen <= 0) {
1452 pch->avail = 2; 1452 pch->avail = 2;
1453 spin_unlock_bh(&pch->downl); 1453 spin_unlock_bh(&pch->downl);
1454 continue; 1454 continue;
1455 } 1455 }
1456 1456
1457 mtu = pch->chan->mtu - hdrlen; 1457 mtu = pch->chan->mtu - hdrlen;
1458 if (mtu < 4) 1458 if (mtu < 4)
1459 mtu = 4; 1459 mtu = 4;
1460 if (flen > mtu) 1460 if (flen > mtu)
1461 flen = mtu; 1461 flen = mtu;
1462 if (flen == len) 1462 if (flen == len)
1463 bits |= E; 1463 bits |= E;
1464 frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC); 1464 frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
1465 if (!frag) 1465 if (!frag)
1466 goto noskb; 1466 goto noskb;
1467 q = skb_put(frag, flen + hdrlen); 1467 q = skb_put(frag, flen + hdrlen);
1468 1468
1469 /* make the MP header */ 1469 /* make the MP header */
1470 q[0] = PPP_MP >> 8; 1470 q[0] = PPP_MP >> 8;
1471 q[1] = PPP_MP; 1471 q[1] = PPP_MP;
1472 if (ppp->flags & SC_MP_XSHORTSEQ) { 1472 if (ppp->flags & SC_MP_XSHORTSEQ) {
1473 q[2] = bits + ((ppp->nxseq >> 8) & 0xf); 1473 q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
1474 q[3] = ppp->nxseq; 1474 q[3] = ppp->nxseq;
1475 } else { 1475 } else {
1476 q[2] = bits; 1476 q[2] = bits;
@@ -1483,24 +1483,24 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1483 1483
1484 /* try to send it down the channel */ 1484 /* try to send it down the channel */
1485 chan = pch->chan; 1485 chan = pch->chan;
1486 if (!skb_queue_empty(&pch->file.xq) || 1486 if (!skb_queue_empty(&pch->file.xq) ||
1487 !chan->ops->start_xmit(chan, frag)) 1487 !chan->ops->start_xmit(chan, frag))
1488 skb_queue_tail(&pch->file.xq, frag); 1488 skb_queue_tail(&pch->file.xq, frag);
1489 pch->had_frag = 1; 1489 pch->had_frag = 1;
1490 p += flen; 1490 p += flen;
1491 len -= flen; 1491 len -= flen;
1492 ++ppp->nxseq; 1492 ++ppp->nxseq;
1493 bits = 0; 1493 bits = 0;
1494 spin_unlock_bh(&pch->downl); 1494 spin_unlock_bh(&pch->downl);
1495 } 1495 }
1496 ppp->nxchan = i; 1496 ppp->nxchan = i;
1497 1497
1498 return 1; 1498 return 1;
1499 1499
1500 noskb: 1500 noskb:
1501 spin_unlock_bh(&pch->downl); 1501 spin_unlock_bh(&pch->downl);
1502 if (ppp->debug & 1) 1502 if (ppp->debug & 1)
1503 printk(KERN_ERR "PPP: no memory (fragment)\n"); 1503 printk(KERN_ERR "PPP: no memory (fragment)\n");
1504 ++ppp->dev->stats.tx_errors; 1504 ++ppp->dev->stats.tx_errors;
1505 ++ppp->nxseq; 1505 ++ppp->nxseq;
1506 return 1; /* abandon the frame */ 1506 return 1; /* abandon the frame */
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 0c768593aad0..a849f6f23a17 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -568,7 +568,7 @@ void gelic_net_set_multi(struct net_device *netdev)
568 status); 568 status);
569 569
570 if ((netdev->flags & IFF_ALLMULTI) || 570 if ((netdev->flags & IFF_ALLMULTI) ||
571 (netdev->mc_count > GELIC_NET_MC_COUNT_MAX)) { 571 (netdev_mc_count(netdev) > GELIC_NET_MC_COUNT_MAX)) {
572 status = lv1_net_add_multicast_address(bus_id(card), 572 status = lv1_net_add_multicast_address(bus_id(card),
573 dev_id(card), 573 dev_id(card),
574 0, 1); 574 0, 1);
@@ -580,7 +580,7 @@ void gelic_net_set_multi(struct net_device *netdev)
580 } 580 }
581 581
582 /* set multicast addresses */ 582 /* set multicast addresses */
583 for (mc = netdev->mc_list; mc; mc = mc->next) { 583 netdev_for_each_mc_addr(mc, netdev) {
584 addr = 0; 584 addr = 0;
585 p = mc->dmi_addr; 585 p = mc->dmi_addr;
586 for (i = 0; i < ETH_ALEN; i++) { 586 for (i = 0; i < ETH_ALEN; i++) {
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index 227b141c4fbd..2663b2fdc0bb 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -1389,113 +1389,6 @@ static int gelic_wl_get_mode(struct net_device *netdev,
1389 return 0; 1389 return 0;
1390} 1390}
1391 1391
1392#ifdef CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE
1393/* SIOCIWFIRSTPRIV */
1394static int hex2bin(u8 *str, u8 *bin, unsigned int len)
1395{
1396 unsigned int i;
1397 static unsigned char *hex = "0123456789ABCDEF";
1398 unsigned char *p, *q;
1399 u8 tmp;
1400
1401 if (len != WPA_PSK_LEN * 2)
1402 return -EINVAL;
1403
1404 for (i = 0; i < WPA_PSK_LEN * 2; i += 2) {
1405 p = strchr(hex, toupper(str[i]));
1406 q = strchr(hex, toupper(str[i + 1]));
1407 if (!p || !q) {
1408 pr_info("%s: unconvertible PSK digit=%d\n",
1409 __func__, i);
1410 return -EINVAL;
1411 }
1412 tmp = ((p - hex) << 4) + (q - hex);
1413 *bin++ = tmp;
1414 }
1415 return 0;
1416};
1417
1418static int gelic_wl_priv_set_psk(struct net_device *net_dev,
1419 struct iw_request_info *info,
1420 union iwreq_data *data, char *extra)
1421{
1422 struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev));
1423 unsigned int len;
1424 unsigned long irqflag;
1425 int ret = 0;
1426
1427 pr_debug("%s:<- len=%d\n", __func__, data->data.length);
1428 len = data->data.length - 1;
1429 if (len <= 2)
1430 return -EINVAL;
1431
1432 spin_lock_irqsave(&wl->lock, irqflag);
1433 if (extra[0] == '"' && extra[len - 1] == '"') {
1434 pr_debug("%s: passphrase mode\n", __func__);
1435 /* pass phrase */
1436 if (GELIC_WL_EURUS_PSK_MAX_LEN < (len - 2)) {
1437 pr_info("%s: passphrase too long\n", __func__);
1438 ret = -E2BIG;
1439 goto out;
1440 }
1441 memset(wl->psk, 0, sizeof(wl->psk));
1442 wl->psk_len = len - 2;
1443 memcpy(wl->psk, &(extra[1]), wl->psk_len);
1444 wl->psk_type = GELIC_EURUS_WPA_PSK_PASSPHRASE;
1445 } else {
1446 ret = hex2bin(extra, wl->psk, len);
1447 if (ret)
1448 goto out;
1449 wl->psk_len = WPA_PSK_LEN;
1450 wl->psk_type = GELIC_EURUS_WPA_PSK_BIN;
1451 }
1452 set_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat);
1453out:
1454 spin_unlock_irqrestore(&wl->lock, irqflag);
1455 pr_debug("%s:->\n", __func__);
1456 return ret;
1457}
1458
1459static int gelic_wl_priv_get_psk(struct net_device *net_dev,
1460 struct iw_request_info *info,
1461 union iwreq_data *data, char *extra)
1462{
1463 struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev));
1464 char *p;
1465 unsigned long irqflag;
1466 unsigned int i;
1467
1468 pr_debug("%s:<-\n", __func__);
1469 if (!capable(CAP_NET_ADMIN))
1470 return -EPERM;
1471
1472 spin_lock_irqsave(&wl->lock, irqflag);
1473 p = extra;
1474 if (test_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat)) {
1475 if (wl->psk_type == GELIC_EURUS_WPA_PSK_BIN) {
1476 for (i = 0; i < wl->psk_len; i++) {
1477 sprintf(p, "%02xu", wl->psk[i]);
1478 p += 2;
1479 }
1480 *p = '\0';
1481 data->data.length = wl->psk_len * 2;
1482 } else {
1483 *p++ = '"';
1484 memcpy(p, wl->psk, wl->psk_len);
1485 p += wl->psk_len;
1486 *p++ = '"';
1487 *p = '\0';
1488 data->data.length = wl->psk_len + 2;
1489 }
1490 } else
1491 /* no psk set */
1492 data->data.length = 0;
1493 spin_unlock_irqrestore(&wl->lock, irqflag);
1494 pr_debug("%s:-> %d\n", __func__, data->data.length);
1495 return 0;
1496}
1497#endif
1498
1499/* SIOCGIWNICKN */ 1392/* SIOCGIWNICKN */
1500static int gelic_wl_get_nick(struct net_device *net_dev, 1393static int gelic_wl_get_nick(struct net_device *net_dev,
1501 struct iw_request_info *info, 1394 struct iw_request_info *info,
@@ -1571,8 +1464,10 @@ static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan,
1571 init_completion(&wl->scan_done); 1464 init_completion(&wl->scan_done);
1572 /* 1465 /*
1573 * If we have already a bss list, don't try to get new 1466 * If we have already a bss list, don't try to get new
1467 * unless we are doing an ESSID scan
1574 */ 1468 */
1575 if (!always_scan && wl->scan_stat == GELIC_WL_SCAN_STAT_GOT_LIST) { 1469 if ((!essid_len && !always_scan)
1470 && wl->scan_stat == GELIC_WL_SCAN_STAT_GOT_LIST) {
1576 pr_debug("%s: already has the list\n", __func__); 1471 pr_debug("%s: already has the list\n", __func__);
1577 complete(&wl->scan_done); 1472 complete(&wl->scan_done);
1578 goto out; 1473 goto out;
@@ -1673,7 +1568,7 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
1673 } 1568 }
1674 } 1569 }
1675 1570
1676 /* put them in the newtork_list */ 1571 /* put them in the network_list */
1677 for (i = 0, scan_info_size = 0, scan_info = buf; 1572 for (i = 0, scan_info_size = 0, scan_info = buf;
1678 scan_info_size < data_len; 1573 scan_info_size < data_len;
1679 i++, scan_info_size += be16_to_cpu(scan_info->size), 1574 i++, scan_info_size += be16_to_cpu(scan_info->size),
@@ -2009,7 +1904,7 @@ static int gelic_wl_do_wpa_setup(struct gelic_wl_info *wl)
2009 /* PSK type */ 1904 /* PSK type */
2010 wpa->psk_type = cpu_to_be16(wl->psk_type); 1905 wpa->psk_type = cpu_to_be16(wl->psk_type);
2011#ifdef DEBUG 1906#ifdef DEBUG
2012 pr_debug("%s: sec=%s psktype=%s\nn", __func__, 1907 pr_debug("%s: sec=%s psktype=%s\n", __func__,
2013 wpasecstr(wpa->security), 1908 wpasecstr(wpa->security),
2014 (wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ? 1909 (wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ?
2015 "BIN" : "passphrase"); 1910 "BIN" : "passphrase");
@@ -2019,9 +1914,9 @@ static int gelic_wl_do_wpa_setup(struct gelic_wl_info *wl)
2019 * the debug log because this dumps your precious 1914 * the debug log because this dumps your precious
2020 * passphrase/key. 1915 * passphrase/key.
2021 */ 1916 */
2022 pr_debug("%s: psk=%s\n", 1917 pr_debug("%s: psk=%s\n", __func__,
2023 (wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ? 1918 (wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ?
2024 (char *)"N/A" : (char *)wpa->psk); 1919 "N/A" : wpa->psk);
2025#endif 1920#endif
2026#endif 1921#endif
2027 /* issue wpa setup */ 1922 /* issue wpa setup */
@@ -2406,40 +2301,10 @@ static const iw_handler gelic_wl_wext_handler[] =
2406 IW_IOCTL(SIOCGIWNICKN) = gelic_wl_get_nick, 2301 IW_IOCTL(SIOCGIWNICKN) = gelic_wl_get_nick,
2407}; 2302};
2408 2303
2409#ifdef CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE
2410static struct iw_priv_args gelic_wl_private_args[] =
2411{
2412 {
2413 .cmd = GELIC_WL_PRIV_SET_PSK,
2414 .set_args = IW_PRIV_TYPE_CHAR |
2415 (GELIC_WL_EURUS_PSK_MAX_LEN + 2),
2416 .name = "set_psk"
2417 },
2418 {
2419 .cmd = GELIC_WL_PRIV_GET_PSK,
2420 .get_args = IW_PRIV_TYPE_CHAR |
2421 (GELIC_WL_EURUS_PSK_MAX_LEN + 2),
2422 .name = "get_psk"
2423 }
2424};
2425
2426static const iw_handler gelic_wl_private_handler[] =
2427{
2428 gelic_wl_priv_set_psk,
2429 gelic_wl_priv_get_psk,
2430};
2431#endif
2432
2433static const struct iw_handler_def gelic_wl_wext_handler_def = { 2304static const struct iw_handler_def gelic_wl_wext_handler_def = {
2434 .num_standard = ARRAY_SIZE(gelic_wl_wext_handler), 2305 .num_standard = ARRAY_SIZE(gelic_wl_wext_handler),
2435 .standard = gelic_wl_wext_handler, 2306 .standard = gelic_wl_wext_handler,
2436 .get_wireless_stats = gelic_wl_get_wireless_stats, 2307 .get_wireless_stats = gelic_wl_get_wireless_stats,
2437#ifdef CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE
2438 .num_private = ARRAY_SIZE(gelic_wl_private_handler),
2439 .num_private_args = ARRAY_SIZE(gelic_wl_private_args),
2440 .private = gelic_wl_private_handler,
2441 .private_args = gelic_wl_private_args,
2442#endif
2443}; 2308};
2444 2309
2445static struct net_device * __devinit gelic_wl_alloc(struct gelic_card *card) 2310static struct net_device * __devinit gelic_wl_alloc(struct gelic_card *card)
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index dd35066a7f8d..4ef0afbcbe1b 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -61,7 +61,7 @@ static int msi;
61module_param(msi, int, 0); 61module_param(msi, int, 0);
62MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); 62MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
63 63
64static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = { 64static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = {
65 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, 65 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
66 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, 66 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
67 /* required last entry */ 67 /* required last entry */
@@ -4087,7 +4087,6 @@ static void __devexit ql3xxx_remove(struct pci_dev *pdev)
4087 struct ql3_adapter *qdev = netdev_priv(ndev); 4087 struct ql3_adapter *qdev = netdev_priv(ndev);
4088 4088
4089 unregister_netdev(ndev); 4089 unregister_netdev(ndev);
4090 qdev = netdev_priv(ndev);
4091 4090
4092 ql_disable_interrupts(qdev); 4091 ql_disable_interrupts(qdev);
4093 4092
diff --git a/drivers/net/qlcnic/Makefile b/drivers/net/qlcnic/Makefile
new file mode 100644
index 000000000000..ddba83ef3f44
--- /dev/null
+++ b/drivers/net/qlcnic/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for Qlogic 1G/10G Ethernet Driver for CNA devices
3#
4
5obj-$(CONFIG_QLCNIC) := qlcnic.o
6
7qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
8 qlcnic_ethtool.o qlcnic_ctx.o
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
new file mode 100644
index 000000000000..b40a851ec7d1
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -0,0 +1,1126 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#ifndef _QLCNIC_H_
26#define _QLCNIC_H_
27
28#include <linux/module.h>
29#include <linux/kernel.h>
30#include <linux/types.h>
31#include <linux/ioport.h>
32#include <linux/pci.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ip.h>
36#include <linux/in.h>
37#include <linux/tcp.h>
38#include <linux/skbuff.h>
39#include <linux/firmware.h>
40
41#include <linux/ethtool.h>
42#include <linux/mii.h>
43#include <linux/timer.h>
44
45#include <linux/vmalloc.h>
46
47#include <linux/io.h>
48#include <asm/byteorder.h>
49
50#include "qlcnic_hdr.h"
51
52#define _QLCNIC_LINUX_MAJOR 5
53#define _QLCNIC_LINUX_MINOR 0
54#define _QLCNIC_LINUX_SUBVERSION 0
55#define QLCNIC_LINUX_VERSIONID "5.0.0"
56
57#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
58#define _major(v) (((v) >> 24) & 0xff)
59#define _minor(v) (((v) >> 16) & 0xff)
60#define _build(v) ((v) & 0xffff)
61
62/* version in image has weird encoding:
63 * 7:0 - major
64 * 15:8 - minor
65 * 31:16 - build (little endian)
66 */
67#define QLCNIC_DECODE_VERSION(v) \
68 QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
69
70#define QLCNIC_NUM_FLASH_SECTORS (64)
71#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024)
72#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \
73 * QLCNIC_FLASH_SECTOR_SIZE)
74
75#define RCV_DESC_RINGSIZE(rds_ring) \
76 (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
77#define RCV_BUFF_RINGSIZE(rds_ring) \
78 (sizeof(struct qlcnic_rx_buffer) * rds_ring->num_desc)
79#define STATUS_DESC_RINGSIZE(sds_ring) \
80 (sizeof(struct status_desc) * (sds_ring)->num_desc)
81#define TX_BUFF_RINGSIZE(tx_ring) \
82 (sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc)
83#define TX_DESC_RINGSIZE(tx_ring) \
84 (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
85
86#define QLCNIC_P3P_A0 0x50
87
88#define QLCNIC_IS_REVISION_P3P(REVISION) (REVISION >= QLCNIC_P3P_A0)
89
90#define FIRST_PAGE_GROUP_START 0
91#define FIRST_PAGE_GROUP_END 0x100000
92
93#define P3_MAX_MTU (9600)
94#define QLCNIC_MAX_ETHERHDR 32 /* This contains some padding */
95
96#define QLCNIC_P3_RX_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN)
97#define QLCNIC_P3_RX_JUMBO_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + P3_MAX_MTU)
98#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048
99#define QLCNIC_LRO_BUFFER_EXTRA 2048
100
101#define QLCNIC_RX_LRO_BUFFER_LENGTH (8060)
102
103/* Opcodes to be used with the commands */
104#define TX_ETHER_PKT 0x01
105#define TX_TCP_PKT 0x02
106#define TX_UDP_PKT 0x03
107#define TX_IP_PKT 0x04
108#define TX_TCP_LSO 0x05
109#define TX_TCP_LSO6 0x06
110#define TX_IPSEC 0x07
111#define TX_IPSEC_CMD 0x0a
112#define TX_TCPV6_PKT 0x0b
113#define TX_UDPV6_PKT 0x0c
114
115/* Tx defines */
116#define MAX_BUFFERS_PER_CMD 32
117#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + 4)
118#define QLCNIC_MAX_TX_TIMEOUTS 2
119
120/*
121 * Following are the states of the Phantom. Phantom will set them and
122 * Host will read to check if the fields are correct.
123 */
124#define PHAN_INITIALIZE_FAILED 0xffff
125#define PHAN_INITIALIZE_COMPLETE 0xff01
126
127/* Host writes the following to notify that it has done the init-handshake */
128#define PHAN_INITIALIZE_ACK 0xf00f
129#define PHAN_PEG_RCV_INITIALIZED 0xff01
130
131#define NUM_RCV_DESC_RINGS 3
132#define NUM_STS_DESC_RINGS 4
133
134#define RCV_RING_NORMAL 0
135#define RCV_RING_JUMBO 1
136#define RCV_RING_LRO 2
137
138#define MIN_CMD_DESCRIPTORS 64
139#define MIN_RCV_DESCRIPTORS 64
140#define MIN_JUMBO_DESCRIPTORS 32
141
142#define MAX_CMD_DESCRIPTORS 1024
143#define MAX_RCV_DESCRIPTORS_1G 4096
144#define MAX_RCV_DESCRIPTORS_10G 8192
145#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
146#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
147#define MAX_LRO_RCV_DESCRIPTORS 8
148
149#define DEFAULT_RCV_DESCRIPTORS_1G 2048
150#define DEFAULT_RCV_DESCRIPTORS_10G 4096
151
152#define get_next_index(index, length) \
153 (((index) + 1) & ((length) - 1))
154
155#define MPORT_MULTI_FUNCTION_MODE 0x2222
156
157/*
158 * Following data structures describe the descriptors that will be used.
159 * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
160 * we are doing LSO (above the 1500 size packet) only.
161 */
162
163#define FLAGS_VLAN_TAGGED 0x10
164#define FLAGS_VLAN_OOB 0x40
165
166#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
167 (cmd_desc)->vlan_TCI = cpu_to_le16(v);
168#define qlcnic_set_cmd_desc_port(cmd_desc, var) \
169 ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
170#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
171 ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
172
173#define qlcnic_set_tx_port(_desc, _port) \
174 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
175
176#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
177 ((_desc)->flags_opcode = \
178 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
179
180#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
181 ((_desc)->nfrags__length = \
182 cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
183
184struct cmd_desc_type0 {
185 u8 tcp_hdr_offset; /* For LSO only */
186 u8 ip_hdr_offset; /* For LSO only */
187 __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */
188 __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */
189
190 __le64 addr_buffer2;
191
192 __le16 reference_handle;
193 __le16 mss;
194 u8 port_ctxid; /* 7:4 ctxid 3:0 port */
195 u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
196 __le16 conn_id; /* IPSec offoad only */
197
198 __le64 addr_buffer3;
199 __le64 addr_buffer1;
200
201 __le16 buffer_length[4];
202
203 __le64 addr_buffer4;
204
205 __le32 reserved2;
206 __le16 reserved;
207 __le16 vlan_TCI;
208
209} __attribute__ ((aligned(64)));
210
211/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
212struct rcv_desc {
213 __le16 reference_handle;
214 __le16 reserved;
215 __le32 buffer_length; /* allocated buffer length (usually 2K) */
216 __le64 addr_buffer;
217};
218
219/* opcode field in status_desc */
220#define QLCNIC_SYN_OFFLOAD 0x03
221#define QLCNIC_RXPKT_DESC 0x04
222#define QLCNIC_OLD_RXPKT_DESC 0x3f
223#define QLCNIC_RESPONSE_DESC 0x05
224#define QLCNIC_LRO_DESC 0x12
225
226/* for status field in status_desc */
227#define STATUS_CKSUM_OK (2)
228
229/* owner bits of status_desc */
230#define STATUS_OWNER_HOST (0x1ULL << 56)
231#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
232
233/* Status descriptor:
234 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
235 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
236 53-55 desc_cnt, 56-57 owner, 58-63 opcode
237 */
238#define qlcnic_get_sts_port(sts_data) \
239 ((sts_data) & 0x0F)
240#define qlcnic_get_sts_status(sts_data) \
241 (((sts_data) >> 4) & 0x0F)
242#define qlcnic_get_sts_type(sts_data) \
243 (((sts_data) >> 8) & 0x0F)
244#define qlcnic_get_sts_totallength(sts_data) \
245 (((sts_data) >> 12) & 0xFFFF)
246#define qlcnic_get_sts_refhandle(sts_data) \
247 (((sts_data) >> 28) & 0xFFFF)
248#define qlcnic_get_sts_prot(sts_data) \
249 (((sts_data) >> 44) & 0x0F)
250#define qlcnic_get_sts_pkt_offset(sts_data) \
251 (((sts_data) >> 48) & 0x1F)
252#define qlcnic_get_sts_desc_cnt(sts_data) \
253 (((sts_data) >> 53) & 0x7)
254#define qlcnic_get_sts_opcode(sts_data) \
255 (((sts_data) >> 58) & 0x03F)
256
257#define qlcnic_get_lro_sts_refhandle(sts_data) \
258 ((sts_data) & 0x0FFFF)
259#define qlcnic_get_lro_sts_length(sts_data) \
260 (((sts_data) >> 16) & 0x0FFFF)
261#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
262 (((sts_data) >> 32) & 0x0FF)
263#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
264 (((sts_data) >> 40) & 0x0FF)
265#define qlcnic_get_lro_sts_timestamp(sts_data) \
266 (((sts_data) >> 48) & 0x1)
267#define qlcnic_get_lro_sts_type(sts_data) \
268 (((sts_data) >> 49) & 0x7)
269#define qlcnic_get_lro_sts_push_flag(sts_data) \
270 (((sts_data) >> 52) & 0x1)
271#define qlcnic_get_lro_sts_seq_number(sts_data) \
272 ((sts_data) & 0x0FFFFFFFF)
273
274
275struct status_desc {
276 __le64 status_desc_data[2];
277} __attribute__ ((aligned(16)));
278
279/* UNIFIED ROMIMAGE */
280#define QLCNIC_UNI_FW_MIN_SIZE 0xc8000
281#define QLCNIC_UNI_DIR_SECT_PRODUCT_TBL 0x0
282#define QLCNIC_UNI_DIR_SECT_BOOTLD 0x6
283#define QLCNIC_UNI_DIR_SECT_FW 0x7
284
285/*Offsets */
286#define QLCNIC_UNI_CHIP_REV_OFF 10
287#define QLCNIC_UNI_FLAGS_OFF 11
288#define QLCNIC_UNI_BIOS_VERSION_OFF 12
289#define QLCNIC_UNI_BOOTLD_IDX_OFF 27
290#define QLCNIC_UNI_FIRMWARE_IDX_OFF 29
291
292struct uni_table_desc{
293 u32 findex;
294 u32 num_entries;
295 u32 entry_size;
296 u32 reserved[5];
297};
298
299struct uni_data_desc{
300 u32 findex;
301 u32 size;
302 u32 reserved[5];
303};
304
305/* Magic number to let user know flash is programmed */
306#define QLCNIC_BDINFO_MAGIC 0x12345678
307
308#define QLCNIC_BRDTYPE_P3_REF_QG 0x0021
309#define QLCNIC_BRDTYPE_P3_HMEZ 0x0022
310#define QLCNIC_BRDTYPE_P3_10G_CX4_LP 0x0023
311#define QLCNIC_BRDTYPE_P3_4_GB 0x0024
312#define QLCNIC_BRDTYPE_P3_IMEZ 0x0025
313#define QLCNIC_BRDTYPE_P3_10G_SFP_PLUS 0x0026
314#define QLCNIC_BRDTYPE_P3_10000_BASE_T 0x0027
315#define QLCNIC_BRDTYPE_P3_XG_LOM 0x0028
316#define QLCNIC_BRDTYPE_P3_4_GB_MM 0x0029
317#define QLCNIC_BRDTYPE_P3_10G_SFP_CT 0x002a
318#define QLCNIC_BRDTYPE_P3_10G_SFP_QT 0x002b
319#define QLCNIC_BRDTYPE_P3_10G_CX4 0x0031
320#define QLCNIC_BRDTYPE_P3_10G_XFP 0x0032
321#define QLCNIC_BRDTYPE_P3_10G_TP 0x0080
322
323/* Flash memory map */
324#define QLCNIC_BRDCFG_START 0x4000 /* board config */
325#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
326#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
327#define QLCNIC_USER_START 0x3E8000 /* Firmare info */
328
329#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
330#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
331#define QLCNIC_FW_SERIAL_NUM_OFFSET (QLCNIC_USER_START+0x81c)
332#define QLCNIC_BIOS_VERSION_OFFSET (QLCNIC_USER_START+0x83c)
333
334#define QLCNIC_BRDTYPE_OFFSET (QLCNIC_BRDCFG_START+0x8)
335#define QLCNIC_FW_MAGIC_OFFSET (QLCNIC_BRDCFG_START+0x128)
336
337#define QLCNIC_FW_MIN_SIZE (0x3fffff)
338#define QLCNIC_UNIFIED_ROMIMAGE 0
339#define QLCNIC_FLASH_ROMIMAGE 1
340#define QLCNIC_UNKNOWN_ROMIMAGE 0xff
341
342#define QLCNIC_UNIFIED_ROMIMAGE_NAME "phanfw.bin"
343#define QLCNIC_FLASH_ROMIMAGE_NAME "flash"
344
345extern char qlcnic_driver_name[];
346
347/* Number of status descriptors to handle per interrupt */
348#define MAX_STATUS_HANDLE (64)
349
350/*
351 * qlcnic_skb_frag{} is to contain mapping info for each SG list. This
352 * has to be freed when DMA is complete. This is part of qlcnic_tx_buffer{}.
353 */
354struct qlcnic_skb_frag {
355 u64 dma;
356 u64 length;
357};
358
359struct qlcnic_recv_crb {
360 u32 crb_rcv_producer[NUM_RCV_DESC_RINGS];
361 u32 crb_sts_consumer[NUM_STS_DESC_RINGS];
362 u32 sw_int_mask[NUM_STS_DESC_RINGS];
363};
364
365/* Following defines are for the state of the buffers */
366#define QLCNIC_BUFFER_FREE 0
367#define QLCNIC_BUFFER_BUSY 1
368
369/*
370 * There will be one qlcnic_buffer per skb packet. These will be
371 * used to save the dma info for pci_unmap_page()
372 */
373struct qlcnic_cmd_buffer {
374 struct sk_buff *skb;
375 struct qlcnic_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
376 u32 frag_count;
377};
378
379/* In rx_buffer, we do not need multiple fragments as is a single buffer */
380struct qlcnic_rx_buffer {
381 struct list_head list;
382 struct sk_buff *skb;
383 u64 dma;
384 u16 ref_handle;
385 u16 state;
386};
387
388/* Board types */
389#define QLCNIC_GBE 0x01
390#define QLCNIC_XGBE 0x02
391
392/*
393 * One hardware_context{} per adapter
394 * contains interrupt info as well shared hardware info.
395 */
396struct qlcnic_hardware_context {
397 void __iomem *pci_base0;
398 void __iomem *ocm_win_crb;
399
400 unsigned long pci_len0;
401
402 u32 ocm_win;
403 u32 crb_win;
404
405 rwlock_t crb_lock;
406 struct mutex mem_lock;
407
408 u8 cut_through;
409 u8 revision_id;
410 u8 pci_func;
411 u8 linkup;
412 u16 port_type;
413 u16 board_type;
414};
415
416struct qlcnic_adapter_stats {
417 u64 xmitcalled;
418 u64 xmitfinished;
419 u64 rxdropped;
420 u64 txdropped;
421 u64 csummed;
422 u64 rx_pkts;
423 u64 lro_pkts;
424 u64 rxbytes;
425 u64 txbytes;
426};
427
428/*
429 * Rcv Descriptor Context. One such per Rcv Descriptor. There may
430 * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
431 */
432struct qlcnic_host_rds_ring {
433 u32 producer;
434 u32 num_desc;
435 u32 dma_size;
436 u32 skb_size;
437 u32 flags;
438 void __iomem *crb_rcv_producer;
439 struct rcv_desc *desc_head;
440 struct qlcnic_rx_buffer *rx_buf_arr;
441 struct list_head free_list;
442 spinlock_t lock;
443 dma_addr_t phys_addr;
444};
445
446struct qlcnic_host_sds_ring {
447 u32 consumer;
448 u32 num_desc;
449 void __iomem *crb_sts_consumer;
450 void __iomem *crb_intr_mask;
451
452 struct status_desc *desc_head;
453 struct qlcnic_adapter *adapter;
454 struct napi_struct napi;
455 struct list_head free_list[NUM_RCV_DESC_RINGS];
456
457 int irq;
458
459 dma_addr_t phys_addr;
460 char name[IFNAMSIZ+4];
461};
462
463struct qlcnic_host_tx_ring {
464 u32 producer;
465 __le32 *hw_consumer;
466 u32 sw_consumer;
467 void __iomem *crb_cmd_producer;
468 u32 num_desc;
469
470 struct netdev_queue *txq;
471
472 struct qlcnic_cmd_buffer *cmd_buf_arr;
473 struct cmd_desc_type0 *desc_head;
474 dma_addr_t phys_addr;
475 dma_addr_t hw_cons_phys_addr;
476};
477
478/*
479 * Receive context. There is one such structure per instance of the
480 * receive processing. Any state information that is relevant to
481 * the receive, and is must be in this structure. The global data may be
482 * present elsewhere.
483 */
484struct qlcnic_recv_context {
485 u32 state;
486 u16 context_id;
487 u16 virt_port;
488
489 struct qlcnic_host_rds_ring *rds_rings;
490 struct qlcnic_host_sds_ring *sds_rings;
491};
492
493/* HW context creation */
494
495#define QLCNIC_OS_CRB_RETRY_COUNT 4000
496#define QLCNIC_CDRP_SIGNATURE_MAKE(pcifn, version) \
497 (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
498
499#define QLCNIC_CDRP_CMD_BIT 0x80000000
500
501/*
502 * All responses must have the QLCNIC_CDRP_CMD_BIT cleared
503 * in the crb QLCNIC_CDRP_CRB_OFFSET.
504 */
505#define QLCNIC_CDRP_FORM_RSP(rsp) (rsp)
506#define QLCNIC_CDRP_IS_RSP(rsp) (((rsp) & QLCNIC_CDRP_CMD_BIT) == 0)
507
508#define QLCNIC_CDRP_RSP_OK 0x00000001
509#define QLCNIC_CDRP_RSP_FAIL 0x00000002
510#define QLCNIC_CDRP_RSP_TIMEOUT 0x00000003
511
512/*
513 * All commands must have the QLCNIC_CDRP_CMD_BIT set in
514 * the crb QLCNIC_CDRP_CRB_OFFSET.
515 */
516#define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd))
517#define QLCNIC_CDRP_IS_CMD(cmd) (((cmd) & QLCNIC_CDRP_CMD_BIT) != 0)
518
519#define QLCNIC_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001
520#define QLCNIC_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002
521#define QLCNIC_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003
522#define QLCNIC_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004
523#define QLCNIC_CDRP_CMD_READ_MAX_RX_CTX 0x00000005
524#define QLCNIC_CDRP_CMD_READ_MAX_TX_CTX 0x00000006
525#define QLCNIC_CDRP_CMD_CREATE_RX_CTX 0x00000007
526#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008
527#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009
528#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
529#define QLCNIC_CDRP_CMD_SETUP_STATISTICS 0x0000000e
530#define QLCNIC_CDRP_CMD_GET_STATISTICS 0x0000000f
531#define QLCNIC_CDRP_CMD_DELETE_STATISTICS 0x00000010
532#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012
533#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013
534#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014
535#define QLCNIC_CDRP_CMD_READ_HW_REG 0x00000015
536#define QLCNIC_CDRP_CMD_GET_FLOW_CTL 0x00000016
537#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017
538#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018
539#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019
540#define QLCNIC_CDRP_CMD_CONFIGURE_TOE 0x0000001a
541#define QLCNIC_CDRP_CMD_FUNC_ATTRIB 0x0000001b
542#define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c
543#define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d
544#define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e
545#define QLCNIC_CDRP_CMD_MAX 0x0000001f
546
547#define QLCNIC_RCODE_SUCCESS 0
548#define QLCNIC_RCODE_TIMEOUT 17
549#define QLCNIC_DESTROY_CTX_RESET 0
550
551/*
552 * Capabilities Announced
553 */
554#define QLCNIC_CAP0_LEGACY_CONTEXT (1)
555#define QLCNIC_CAP0_LEGACY_MN (1 << 2)
556#define QLCNIC_CAP0_LSO (1 << 6)
557#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
558#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
559
560/*
561 * Context state
562 */
563#define QLCHAL_VERSION 1
564
565#define QLCNIC_HOST_CTX_STATE_ACTIVE 2
566
567/*
568 * Rx context
569 */
570
571struct qlcnic_hostrq_sds_ring {
572 __le64 host_phys_addr; /* Ring base addr */
573 __le32 ring_size; /* Ring entries */
574 __le16 msi_index;
575 __le16 rsvd; /* Padding */
576};
577
578struct qlcnic_hostrq_rds_ring {
579 __le64 host_phys_addr; /* Ring base addr */
580 __le64 buff_size; /* Packet buffer size */
581 __le32 ring_size; /* Ring entries */
582 __le32 ring_kind; /* Class of ring */
583};
584
585struct qlcnic_hostrq_rx_ctx {
586 __le64 host_rsp_dma_addr; /* Response dma'd here */
587 __le32 capabilities[4]; /* Flag bit vector */
588 __le32 host_int_crb_mode; /* Interrupt crb usage */
589 __le32 host_rds_crb_mode; /* RDS crb usage */
590 /* These ring offsets are relative to data[0] below */
591 __le32 rds_ring_offset; /* Offset to RDS config */
592 __le32 sds_ring_offset; /* Offset to SDS config */
593 __le16 num_rds_rings; /* Count of RDS rings */
594 __le16 num_sds_rings; /* Count of SDS rings */
595 __le16 rsvd1; /* Padding */
596 __le16 rsvd2; /* Padding */
597 u8 reserved[128]; /* reserve space for future expansion*/
598 /* MUST BE 64-bit aligned.
599 The following is packed:
600 - N hostrq_rds_rings
601 - N hostrq_sds_rings */
602 char data[0];
603};
604
605struct qlcnic_cardrsp_rds_ring{
606 __le32 host_producer_crb; /* Crb to use */
607 __le32 rsvd1; /* Padding */
608};
609
610struct qlcnic_cardrsp_sds_ring {
611 __le32 host_consumer_crb; /* Crb to use */
612 __le32 interrupt_crb; /* Crb to use */
613};
614
615struct qlcnic_cardrsp_rx_ctx {
616 /* These ring offsets are relative to data[0] below */
617 __le32 rds_ring_offset; /* Offset to RDS config */
618 __le32 sds_ring_offset; /* Offset to SDS config */
619 __le32 host_ctx_state; /* Starting State */
620 __le32 num_fn_per_port; /* How many PCI fn share the port */
621 __le16 num_rds_rings; /* Count of RDS rings */
622 __le16 num_sds_rings; /* Count of SDS rings */
623 __le16 context_id; /* Handle for context */
624 u8 phys_port; /* Physical id of port */
625 u8 virt_port; /* Virtual/Logical id of port */
626 u8 reserved[128]; /* save space for future expansion */
627 /* MUST BE 64-bit aligned.
628 The following is packed:
629 - N cardrsp_rds_rings
630 - N cardrs_sds_rings */
631 char data[0];
632};
633
634#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
635 (sizeof(HOSTRQ_RX) + \
636 (rds_rings)*(sizeof(struct qlcnic_hostrq_rds_ring)) + \
637 (sds_rings)*(sizeof(struct qlcnic_hostrq_sds_ring)))
638
639#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \
640 (sizeof(CARDRSP_RX) + \
641 (rds_rings)*(sizeof(struct qlcnic_cardrsp_rds_ring)) + \
642 (sds_rings)*(sizeof(struct qlcnic_cardrsp_sds_ring)))
643
644/*
645 * Tx context
646 */
647
648struct qlcnic_hostrq_cds_ring {
649 __le64 host_phys_addr; /* Ring base addr */
650 __le32 ring_size; /* Ring entries */
651 __le32 rsvd; /* Padding */
652};
653
654struct qlcnic_hostrq_tx_ctx {
655 __le64 host_rsp_dma_addr; /* Response dma'd here */
656 __le64 cmd_cons_dma_addr; /* */
657 __le64 dummy_dma_addr; /* */
658 __le32 capabilities[4]; /* Flag bit vector */
659 __le32 host_int_crb_mode; /* Interrupt crb usage */
660 __le32 rsvd1; /* Padding */
661 __le16 rsvd2; /* Padding */
662 __le16 interrupt_ctl;
663 __le16 msi_index;
664 __le16 rsvd3; /* Padding */
665 struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */
666 u8 reserved[128]; /* future expansion */
667};
668
669struct qlcnic_cardrsp_cds_ring {
670 __le32 host_producer_crb; /* Crb to use */
671 __le32 interrupt_crb; /* Crb to use */
672};
673
674struct qlcnic_cardrsp_tx_ctx {
675 __le32 host_ctx_state; /* Starting state */
676 __le16 context_id; /* Handle for context */
677 u8 phys_port; /* Physical id of port */
678 u8 virt_port; /* Virtual/Logical id of port */
679 struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */
680 u8 reserved[128]; /* future expansion */
681};
682
683#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
684#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
685
686/* CRB */
687
688#define QLCNIC_HOST_RDS_CRB_MODE_UNIQUE 0
689#define QLCNIC_HOST_RDS_CRB_MODE_SHARED 1
690#define QLCNIC_HOST_RDS_CRB_MODE_CUSTOM 2
691#define QLCNIC_HOST_RDS_CRB_MODE_MAX 3
692
693#define QLCNIC_HOST_INT_CRB_MODE_UNIQUE 0
694#define QLCNIC_HOST_INT_CRB_MODE_SHARED 1
695#define QLCNIC_HOST_INT_CRB_MODE_NORX 2
696#define QLCNIC_HOST_INT_CRB_MODE_NOTX 3
697#define QLCNIC_HOST_INT_CRB_MODE_NORXTX 4
698
699
700/* MAC */
701
702#define MC_COUNT_P3 38
703
704#define QLCNIC_MAC_NOOP 0
705#define QLCNIC_MAC_ADD 1
706#define QLCNIC_MAC_DEL 2
707
708struct qlcnic_mac_list_s {
709 struct list_head list;
710 uint8_t mac_addr[ETH_ALEN+2];
711};
712
713/*
714 * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
715 * adjusted based on configured MTU.
716 */
717#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3
718#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256
719#define QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS 64
720#define QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US 4
721
722#define QLCNIC_INTR_DEFAULT 0x04
723
724union qlcnic_nic_intr_coalesce_data {
725 struct {
726 u16 rx_packets;
727 u16 rx_time_us;
728 u16 tx_packets;
729 u16 tx_time_us;
730 } data;
731 u64 word;
732};
733
734struct qlcnic_nic_intr_coalesce {
735 u16 stats_time_us;
736 u16 rate_sample_time;
737 u16 flags;
738 u16 rsvd_1;
739 u32 low_threshold;
740 u32 high_threshold;
741 union qlcnic_nic_intr_coalesce_data normal;
742 union qlcnic_nic_intr_coalesce_data low;
743 union qlcnic_nic_intr_coalesce_data high;
744 union qlcnic_nic_intr_coalesce_data irq;
745};
746
747#define QLCNIC_HOST_REQUEST 0x13
748#define QLCNIC_REQUEST 0x14
749
750#define QLCNIC_MAC_EVENT 0x1
751
752#define QLCNIC_IP_UP 2
753#define QLCNIC_IP_DOWN 3
754
755/*
756 * Driver --> Firmware
757 */
758#define QLCNIC_H2C_OPCODE_START 0
759#define QLCNIC_H2C_OPCODE_CONFIG_RSS 1
760#define QLCNIC_H2C_OPCODE_CONFIG_RSS_TBL 2
761#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 3
762#define QLCNIC_H2C_OPCODE_CONFIG_LED 4
763#define QLCNIC_H2C_OPCODE_CONFIG_PROMISCUOUS 5
764#define QLCNIC_H2C_OPCODE_CONFIG_L2_MAC 6
765#define QLCNIC_H2C_OPCODE_LRO_REQUEST 7
766#define QLCNIC_H2C_OPCODE_GET_SNMP_STATS 8
767#define QLCNIC_H2C_OPCODE_PROXY_START_REQUEST 9
768#define QLCNIC_H2C_OPCODE_PROXY_STOP_REQUEST 10
769#define QLCNIC_H2C_OPCODE_PROXY_SET_MTU 11
770#define QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE 12
771#define QLCNIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST 13
772#define QLCNIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST 14
773#define QLCNIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST 15
774#define QLCNIC_H2C_OPCODE_GET_NET_STATS 16
775#define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V 17
776#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 18
777#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 19
778#define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE 20
779#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 21
780#define QLCNIC_C2C_OPCODE 22
781#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 23
782#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 24
783#define QLCNIC_H2C_OPCODE_LAST 25
784/*
785 * Firmware --> Driver
786 */
787
788#define QLCNIC_C2H_OPCODE_START 128
789#define QLCNIC_C2H_OPCODE_CONFIG_RSS_RESPONSE 129
790#define QLCNIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE 130
791#define QLCNIC_C2H_OPCODE_CONFIG_MAC_RESPONSE 131
792#define QLCNIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132
793#define QLCNIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE 133
794#define QLCNIC_C2H_OPCODE_LRO_DELETE_RESPONSE 134
795#define QLCNIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE 135
796#define QLCNIC_C2H_OPCODE_GET_SNMP_STATS 136
797#define QLCNIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY 137
798#define QLCNIC_C2H_OPCODE_INSTALL_LICENSE_REPLY 138
799#define QLCNIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139
800#define QLCNIC_C2H_OPCODE_GET_NET_STATS_RESPONSE 140
801#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
802#define QLCNIC_C2H_OPCODE_LAST 142
803
804#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
805#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
806#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
807
808#define QLCNIC_LRO_REQUEST_CLEANUP 4
809
810/* Capabilites received */
811#define QLCNIC_FW_CAPABILITY_BDG (1 << 8)
812#define QLCNIC_FW_CAPABILITY_FVLANTX (1 << 9)
813#define QLCNIC_FW_CAPABILITY_HW_LRO (1 << 10)
814
815/* module types */
816#define LINKEVENT_MODULE_NOT_PRESENT 1
817#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2
818#define LINKEVENT_MODULE_OPTICAL_SRLR 3
819#define LINKEVENT_MODULE_OPTICAL_LRM 4
820#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5
821#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6
822#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7
823#define LINKEVENT_MODULE_TWINAX 8
824
825#define LINKSPEED_10GBPS 10000
826#define LINKSPEED_1GBPS 1000
827#define LINKSPEED_100MBPS 100
828#define LINKSPEED_10MBPS 10
829
830#define LINKSPEED_ENCODED_10MBPS 0
831#define LINKSPEED_ENCODED_100MBPS 1
832#define LINKSPEED_ENCODED_1GBPS 2
833
834#define LINKEVENT_AUTONEG_DISABLED 0
835#define LINKEVENT_AUTONEG_ENABLED 1
836
837#define LINKEVENT_HALF_DUPLEX 0
838#define LINKEVENT_FULL_DUPLEX 1
839
840#define LINKEVENT_LINKSPEED_MBPS 0
841#define LINKEVENT_LINKSPEED_ENCODED 1
842
843#define AUTO_FW_RESET_ENABLED 0x01
844/* firmware response header:
845 * 63:58 - message type
846 * 57:56 - owner
847 * 55:53 - desc count
848 * 52:48 - reserved
849 * 47:40 - completion id
850 * 39:32 - opcode
851 * 31:16 - error code
852 * 15:00 - reserved
853 */
854#define qlcnic_get_nic_msg_opcode(msg_hdr) \
855 ((msg_hdr >> 32) & 0xFF)
856
857struct qlcnic_fw_msg {
858 union {
859 struct {
860 u64 hdr;
861 u64 body[7];
862 };
863 u64 words[8];
864 };
865};
866
867struct qlcnic_nic_req {
868 __le64 qhdr;
869 __le64 req_hdr;
870 __le64 words[6];
871};
872
873struct qlcnic_mac_req {
874 u8 op;
875 u8 tag;
876 u8 mac_addr[6];
877};
878
879#define QLCNIC_MSI_ENABLED 0x02
880#define QLCNIC_MSIX_ENABLED 0x04
881#define QLCNIC_LRO_ENABLED 0x08
882#define QLCNIC_BRIDGE_ENABLED 0X10
883#define QLCNIC_DIAG_ENABLED 0x20
884#define QLCNIC_IS_MSI_FAMILY(adapter) \
885 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
886
887#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS
888#define QLCNIC_MSIX_TBL_SPACE 8192
889#define QLCNIC_PCI_REG_MSIX_TBL 0x44
890
891#define QLCNIC_NETDEV_WEIGHT 128
892#define QLCNIC_ADAPTER_UP_MAGIC 777
893
894#define __QLCNIC_FW_ATTACHED 0
895#define __QLCNIC_DEV_UP 1
896#define __QLCNIC_RESETTING 2
897#define __QLCNIC_START_FW 4
898
899#define QLCNIC_INTERRUPT_TEST 1
900#define QLCNIC_LOOPBACK_TEST 2
901
902struct qlcnic_adapter {
903 struct qlcnic_hardware_context ahw;
904
905 struct net_device *netdev;
906 struct pci_dev *pdev;
907 struct list_head mac_list;
908
909 spinlock_t tx_clean_lock;
910
911 u16 num_txd;
912 u16 num_rxd;
913 u16 num_jumbo_rxd;
914 u16 num_lro_rxd;
915
916 u8 max_rds_rings;
917 u8 max_sds_rings;
918 u8 driver_mismatch;
919 u8 msix_supported;
920 u8 rx_csum;
921 u8 pci_using_dac;
922 u8 portnum;
923 u8 physical_port;
924
925 u8 mc_enabled;
926 u8 max_mc_count;
927 u8 rss_supported;
928 u8 rsrvd1;
929 u8 fw_wait_cnt;
930 u8 fw_fail_cnt;
931 u8 tx_timeo_cnt;
932 u8 need_fw_reset;
933
934 u8 has_link_events;
935 u8 fw_type;
936 u16 tx_context_id;
937 u16 mtu;
938 u16 is_up;
939
940 u16 link_speed;
941 u16 link_duplex;
942 u16 link_autoneg;
943 u16 module_type;
944
945 u32 capabilities;
946 u32 flags;
947 u32 irq;
948 u32 temp;
949
950 u32 int_vec_bit;
951 u32 heartbit;
952
953 u8 dev_state;
954 u8 diag_test;
955 u8 diag_cnt;
956 u8 rsrd1;
957 u16 rsrd2;
958
959 u8 mac_addr[ETH_ALEN];
960
961 struct qlcnic_adapter_stats stats;
962
963 struct qlcnic_recv_context recv_ctx;
964 struct qlcnic_host_tx_ring *tx_ring;
965
966 void __iomem *tgt_mask_reg;
967 void __iomem *tgt_status_reg;
968 void __iomem *crb_int_state_reg;
969 void __iomem *isr_int_vec;
970
971 struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER];
972
973 struct delayed_work fw_work;
974
975 struct work_struct tx_timeout_task;
976
977 struct qlcnic_nic_intr_coalesce coal;
978
979 unsigned long state;
980 __le32 file_prd_off; /*File fw product offset*/
981 u32 fw_version;
982 const struct firmware *fw;
983};
984
985int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val);
986int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val);
987
988u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
989int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
990int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
991int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
992
993#define QLCRD32(adapter, off) \
994 (qlcnic_hw_read_wx_2M(adapter, off))
995#define QLCWR32(adapter, off, val) \
996 (qlcnic_hw_write_wx_2M(adapter, off, val))
997
998int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32);
999void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
1000
1001#define qlcnic_rom_lock(a) \
1002 qlcnic_pcie_sem_lock((a), 2, QLCNIC_ROM_LOCK_ID)
1003#define qlcnic_rom_unlock(a) \
1004 qlcnic_pcie_sem_unlock((a), 2)
1005#define qlcnic_phy_lock(a) \
1006 qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID)
1007#define qlcnic_phy_unlock(a) \
1008 qlcnic_pcie_sem_unlock((a), 3)
1009#define qlcnic_api_lock(a) \
1010 qlcnic_pcie_sem_lock((a), 5, 0)
1011#define qlcnic_api_unlock(a) \
1012 qlcnic_pcie_sem_unlock((a), 5)
1013#define qlcnic_sw_lock(a) \
1014 qlcnic_pcie_sem_lock((a), 6, 0)
1015#define qlcnic_sw_unlock(a) \
1016 qlcnic_pcie_sem_unlock((a), 6)
1017#define crb_win_lock(a) \
1018 qlcnic_pcie_sem_lock((a), 7, QLCNIC_CRB_WIN_LOCK_ID)
1019#define crb_win_unlock(a) \
1020 qlcnic_pcie_sem_unlock((a), 7)
1021
1022int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
1023int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
1024int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
1025
1026/* Functions from qlcnic_init.c */
1027int qlcnic_phantom_init(struct qlcnic_adapter *adapter);
1028int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
1029int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
1030void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
1031void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
1032int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
1033
1034int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp);
1035int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
1036 u8 *bytes, size_t size);
1037int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter);
1038void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter);
1039
1040void __iomem *qlcnic_get_ioaddr(struct qlcnic_adapter *, u32);
1041
1042int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter);
1043void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter);
1044
1045void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
1046void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
1047
1048int qlcnic_init_firmware(struct qlcnic_adapter *adapter);
1049void qlcnic_watchdog_task(struct work_struct *work);
1050void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
1051 struct qlcnic_host_rds_ring *rds_ring);
1052int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
1053void qlcnic_set_multi(struct net_device *netdev);
1054void qlcnic_free_mac_list(struct qlcnic_adapter *adapter);
1055int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
1056int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter);
1057int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable);
1058int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd);
1059int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable);
1060void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
1061
1062int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
1063int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
1064int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
1065int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable);
1066int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
1067void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
1068 struct qlcnic_host_tx_ring *tx_ring);
1069int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac);
1070void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter);
1071int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
1072
1073/* Functions from qlcnic_main.c */
1074int qlcnic_reset_context(struct qlcnic_adapter *);
1075u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
1076 u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd);
1077void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
1078int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
1079int qlcnic_check_loopback_buff(unsigned char *data);
1080netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
1081void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
1082
1083/*
1084 * QLOGIC Board information
1085 */
1086
1087#define QLCNIC_MAX_BOARD_NAME_LEN 100
1088struct qlcnic_brdinfo {
1089 unsigned short vendor;
1090 unsigned short device;
1091 unsigned short sub_vendor;
1092 unsigned short sub_device;
1093 char short_name[QLCNIC_MAX_BOARD_NAME_LEN];
1094};
1095
1096static const struct qlcnic_brdinfo qlcnic_boards[] = {
1097 {0x1077, 0x8020, 0x1077, 0x203,
1098 "8200 Series Single Port 10GbE Converged Network Adapter \
1099 (TCP/IP Networking)"},
1100 {0x1077, 0x8020, 0x1077, 0x207,
1101 "8200 Series Dual Port 10GbE Converged Network Adapter \
1102 (TCP/IP Networking)"},
1103 {0x1077, 0x8020, 0x1077, 0x20b,
1104 "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"},
1105 {0x1077, 0x8020, 0x1077, 0x20c,
1106 "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"},
1107 {0x1077, 0x8020, 0x1077, 0x20f,
1108 "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
1109 {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
1110};
1111
1112#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
1113
1114static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1115{
1116 smp_mb();
1117 if (tx_ring->producer < tx_ring->sw_consumer)
1118 return tx_ring->sw_consumer - tx_ring->producer;
1119 else
1120 return tx_ring->sw_consumer + tx_ring->num_desc -
1121 tx_ring->producer;
1122}
1123
1124extern const struct ethtool_ops qlcnic_ethtool_ops;
1125
1126#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
new file mode 100644
index 000000000000..0a6a39914aec
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -0,0 +1,534 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include "qlcnic.h"
26
27static u32
28qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
29{
30 u32 rsp;
31 int timeout = 0;
32
33 do {
34 /* give atleast 1ms for firmware to respond */
35 msleep(1);
36
37 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
38 return QLCNIC_CDRP_RSP_TIMEOUT;
39
40 rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET);
41 } while (!QLCNIC_CDRP_IS_RSP(rsp));
42
43 return rsp;
44}
45
46u32
47qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
48 u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
49{
50 u32 rsp;
51 u32 signature;
52 u32 rcode = QLCNIC_RCODE_SUCCESS;
53 struct pci_dev *pdev = adapter->pdev;
54
55 signature = QLCNIC_CDRP_SIGNATURE_MAKE(pci_fn, version);
56
57 /* Acquire semaphore before accessing CRB */
58 if (qlcnic_api_lock(adapter))
59 return QLCNIC_RCODE_TIMEOUT;
60
61 QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
62 QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, arg1);
63 QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, arg2);
64 QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, arg3);
65 QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, QLCNIC_CDRP_FORM_CMD(cmd));
66
67 rsp = qlcnic_poll_rsp(adapter);
68
69 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
70 dev_err(&pdev->dev, "card response timeout.\n");
71 rcode = QLCNIC_RCODE_TIMEOUT;
72 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
73 rcode = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
74 dev_err(&pdev->dev, "failed card response code:0x%x\n",
75 rcode);
76 }
77
78 /* Release semaphore */
79 qlcnic_api_unlock(adapter);
80
81 return rcode;
82}
83
84int
85qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
86{
87 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
88
89 if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
90 if (qlcnic_issue_cmd(adapter,
91 adapter->ahw.pci_func,
92 QLCHAL_VERSION,
93 recv_ctx->context_id,
94 mtu,
95 0,
96 QLCNIC_CDRP_CMD_SET_MTU)) {
97
98 dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
99 return -EIO;
100 }
101 }
102
103 return 0;
104}
105
106static int
107qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
108{
109 void *addr;
110 struct qlcnic_hostrq_rx_ctx *prq;
111 struct qlcnic_cardrsp_rx_ctx *prsp;
112 struct qlcnic_hostrq_rds_ring *prq_rds;
113 struct qlcnic_hostrq_sds_ring *prq_sds;
114 struct qlcnic_cardrsp_rds_ring *prsp_rds;
115 struct qlcnic_cardrsp_sds_ring *prsp_sds;
116 struct qlcnic_host_rds_ring *rds_ring;
117 struct qlcnic_host_sds_ring *sds_ring;
118
119 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
120 u64 phys_addr;
121
122 int i, nrds_rings, nsds_rings;
123 size_t rq_size, rsp_size;
124 u32 cap, reg, val;
125 int err;
126
127 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
128
129 nrds_rings = adapter->max_rds_rings;
130 nsds_rings = adapter->max_sds_rings;
131
132 rq_size =
133 SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
134 nsds_rings);
135 rsp_size =
136 SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
137 nsds_rings);
138
139 addr = pci_alloc_consistent(adapter->pdev,
140 rq_size, &hostrq_phys_addr);
141 if (addr == NULL)
142 return -ENOMEM;
143 prq = (struct qlcnic_hostrq_rx_ctx *)addr;
144
145 addr = pci_alloc_consistent(adapter->pdev,
146 rsp_size, &cardrsp_phys_addr);
147 if (addr == NULL) {
148 err = -ENOMEM;
149 goto out_free_rq;
150 }
151 prsp = (struct qlcnic_cardrsp_rx_ctx *)addr;
152
153 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
154
155 cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN);
156 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
157
158 prq->capabilities[0] = cpu_to_le32(cap);
159 prq->host_int_crb_mode =
160 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
161 prq->host_rds_crb_mode =
162 cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
163
164 prq->num_rds_rings = cpu_to_le16(nrds_rings);
165 prq->num_sds_rings = cpu_to_le16(nsds_rings);
166 prq->rds_ring_offset = cpu_to_le32(0);
167
168 val = le32_to_cpu(prq->rds_ring_offset) +
169 (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
170 prq->sds_ring_offset = cpu_to_le32(val);
171
172 prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data +
173 le32_to_cpu(prq->rds_ring_offset));
174
175 for (i = 0; i < nrds_rings; i++) {
176
177 rds_ring = &recv_ctx->rds_rings[i];
178
179 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
180 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
181 prq_rds[i].ring_kind = cpu_to_le32(i);
182 prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
183 }
184
185 prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data +
186 le32_to_cpu(prq->sds_ring_offset));
187
188 for (i = 0; i < nsds_rings; i++) {
189
190 sds_ring = &recv_ctx->sds_rings[i];
191
192 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
193 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
194 prq_sds[i].msi_index = cpu_to_le16(i);
195 }
196
197 phys_addr = hostrq_phys_addr;
198 err = qlcnic_issue_cmd(adapter,
199 adapter->ahw.pci_func,
200 QLCHAL_VERSION,
201 (u32)(phys_addr >> 32),
202 (u32)(phys_addr & 0xffffffff),
203 rq_size,
204 QLCNIC_CDRP_CMD_CREATE_RX_CTX);
205 if (err) {
206 dev_err(&adapter->pdev->dev,
207 "Failed to create rx ctx in firmware%d\n", err);
208 goto out_free_rsp;
209 }
210
211
212 prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
213 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
214
215 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
216 rds_ring = &recv_ctx->rds_rings[i];
217
218 reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
219 rds_ring->crb_rcv_producer = qlcnic_get_ioaddr(adapter,
220 QLCNIC_REG(reg - 0x200));
221 }
222
223 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
224 &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
225
226 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
227 sds_ring = &recv_ctx->sds_rings[i];
228
229 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
230 sds_ring->crb_sts_consumer = qlcnic_get_ioaddr(adapter,
231 QLCNIC_REG(reg - 0x200));
232
233 reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
234 sds_ring->crb_intr_mask = qlcnic_get_ioaddr(adapter,
235 QLCNIC_REG(reg - 0x200));
236 }
237
238 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
239 recv_ctx->context_id = le16_to_cpu(prsp->context_id);
240 recv_ctx->virt_port = prsp->virt_port;
241
242out_free_rsp:
243 pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
244out_free_rq:
245 pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
246 return err;
247}
248
249static void
250qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
251{
252 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
253
254 if (qlcnic_issue_cmd(adapter,
255 adapter->ahw.pci_func,
256 QLCHAL_VERSION,
257 recv_ctx->context_id,
258 QLCNIC_DESTROY_CTX_RESET,
259 0,
260 QLCNIC_CDRP_CMD_DESTROY_RX_CTX)) {
261
262 dev_err(&adapter->pdev->dev,
263 "Failed to destroy rx ctx in firmware\n");
264 }
265}
266
267static int
268qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
269{
270 struct qlcnic_hostrq_tx_ctx *prq;
271 struct qlcnic_hostrq_cds_ring *prq_cds;
272 struct qlcnic_cardrsp_tx_ctx *prsp;
273 void *rq_addr, *rsp_addr;
274 size_t rq_size, rsp_size;
275 u32 temp;
276 int err;
277 u64 phys_addr;
278 dma_addr_t rq_phys_addr, rsp_phys_addr;
279 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
280
281 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
282 rq_addr = pci_alloc_consistent(adapter->pdev,
283 rq_size, &rq_phys_addr);
284 if (!rq_addr)
285 return -ENOMEM;
286
287 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
288 rsp_addr = pci_alloc_consistent(adapter->pdev,
289 rsp_size, &rsp_phys_addr);
290 if (!rsp_addr) {
291 err = -ENOMEM;
292 goto out_free_rq;
293 }
294
295 memset(rq_addr, 0, rq_size);
296 prq = (struct qlcnic_hostrq_tx_ctx *)rq_addr;
297
298 memset(rsp_addr, 0, rsp_size);
299 prsp = (struct qlcnic_cardrsp_tx_ctx *)rsp_addr;
300
301 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
302
303 temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
304 QLCNIC_CAP0_LSO);
305 prq->capabilities[0] = cpu_to_le32(temp);
306
307 prq->host_int_crb_mode =
308 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
309
310 prq->interrupt_ctl = 0;
311 prq->msi_index = 0;
312 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
313
314 prq_cds = &prq->cds_ring;
315
316 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
317 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
318
319 phys_addr = rq_phys_addr;
320 err = qlcnic_issue_cmd(adapter,
321 adapter->ahw.pci_func,
322 QLCHAL_VERSION,
323 (u32)(phys_addr >> 32),
324 ((u32)phys_addr & 0xffffffff),
325 rq_size,
326 QLCNIC_CDRP_CMD_CREATE_TX_CTX);
327
328 if (err == QLCNIC_RCODE_SUCCESS) {
329 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
330 tx_ring->crb_cmd_producer = qlcnic_get_ioaddr(adapter,
331 QLCNIC_REG(temp - 0x200));
332
333 adapter->tx_context_id =
334 le16_to_cpu(prsp->context_id);
335 } else {
336 dev_err(&adapter->pdev->dev,
337 "Failed to create tx ctx in firmware%d\n", err);
338 err = -EIO;
339 }
340
341 pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
342
343out_free_rq:
344 pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
345
346 return err;
347}
348
349static void
350qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
351{
352 if (qlcnic_issue_cmd(adapter,
353 adapter->ahw.pci_func,
354 QLCHAL_VERSION,
355 adapter->tx_context_id,
356 QLCNIC_DESTROY_CTX_RESET,
357 0,
358 QLCNIC_CDRP_CMD_DESTROY_TX_CTX)) {
359
360 dev_err(&adapter->pdev->dev,
361 "Failed to destroy tx ctx in firmware\n");
362 }
363}
364
365int
366qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val)
367{
368
369 if (qlcnic_issue_cmd(adapter,
370 adapter->ahw.pci_func,
371 QLCHAL_VERSION,
372 reg,
373 0,
374 0,
375 QLCNIC_CDRP_CMD_READ_PHY)) {
376
377 return -EIO;
378 }
379
380 return QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
381}
382
383int
384qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val)
385{
386 return qlcnic_issue_cmd(adapter,
387 adapter->ahw.pci_func,
388 QLCHAL_VERSION,
389 reg,
390 val,
391 0,
392 QLCNIC_CDRP_CMD_WRITE_PHY);
393}
394
395int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
396{
397 void *addr;
398 int err;
399 int ring;
400 struct qlcnic_recv_context *recv_ctx;
401 struct qlcnic_host_rds_ring *rds_ring;
402 struct qlcnic_host_sds_ring *sds_ring;
403 struct qlcnic_host_tx_ring *tx_ring;
404
405 struct pci_dev *pdev = adapter->pdev;
406
407 recv_ctx = &adapter->recv_ctx;
408 tx_ring = adapter->tx_ring;
409
410 tx_ring->hw_consumer = (__le32 *)pci_alloc_consistent(pdev, sizeof(u32),
411 &tx_ring->hw_cons_phys_addr);
412 if (tx_ring->hw_consumer == NULL) {
413 dev_err(&pdev->dev, "failed to allocate tx consumer\n");
414 return -ENOMEM;
415 }
416 *(tx_ring->hw_consumer) = 0;
417
418 /* cmd desc ring */
419 addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
420 &tx_ring->phys_addr);
421
422 if (addr == NULL) {
423 dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
424 return -ENOMEM;
425 }
426
427 tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
428
429 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
430 rds_ring = &recv_ctx->rds_rings[ring];
431 addr = pci_alloc_consistent(adapter->pdev,
432 RCV_DESC_RINGSIZE(rds_ring),
433 &rds_ring->phys_addr);
434 if (addr == NULL) {
435 dev_err(&pdev->dev,
436 "failed to allocate rds ring [%d]\n", ring);
437 err = -ENOMEM;
438 goto err_out_free;
439 }
440 rds_ring->desc_head = (struct rcv_desc *)addr;
441
442 }
443
444 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
445 sds_ring = &recv_ctx->sds_rings[ring];
446
447 addr = pci_alloc_consistent(adapter->pdev,
448 STATUS_DESC_RINGSIZE(sds_ring),
449 &sds_ring->phys_addr);
450 if (addr == NULL) {
451 dev_err(&pdev->dev,
452 "failed to allocate sds ring [%d]\n", ring);
453 err = -ENOMEM;
454 goto err_out_free;
455 }
456 sds_ring->desc_head = (struct status_desc *)addr;
457 }
458
459
460 err = qlcnic_fw_cmd_create_rx_ctx(adapter);
461 if (err)
462 goto err_out_free;
463 err = qlcnic_fw_cmd_create_tx_ctx(adapter);
464 if (err)
465 goto err_out_free;
466
467 set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
468 return 0;
469
470err_out_free:
471 qlcnic_free_hw_resources(adapter);
472 return err;
473}
474
475void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
476{
477 struct qlcnic_recv_context *recv_ctx;
478 struct qlcnic_host_rds_ring *rds_ring;
479 struct qlcnic_host_sds_ring *sds_ring;
480 struct qlcnic_host_tx_ring *tx_ring;
481 int ring;
482
483
484 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
485 qlcnic_fw_cmd_destroy_rx_ctx(adapter);
486 qlcnic_fw_cmd_destroy_tx_ctx(adapter);
487
488 /* Allow dma queues to drain after context reset */
489 msleep(20);
490 }
491
492 recv_ctx = &adapter->recv_ctx;
493
494 tx_ring = adapter->tx_ring;
495 if (tx_ring->hw_consumer != NULL) {
496 pci_free_consistent(adapter->pdev,
497 sizeof(u32),
498 tx_ring->hw_consumer,
499 tx_ring->hw_cons_phys_addr);
500 tx_ring->hw_consumer = NULL;
501 }
502
503 if (tx_ring->desc_head != NULL) {
504 pci_free_consistent(adapter->pdev,
505 TX_DESC_RINGSIZE(tx_ring),
506 tx_ring->desc_head, tx_ring->phys_addr);
507 tx_ring->desc_head = NULL;
508 }
509
510 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
511 rds_ring = &recv_ctx->rds_rings[ring];
512
513 if (rds_ring->desc_head != NULL) {
514 pci_free_consistent(adapter->pdev,
515 RCV_DESC_RINGSIZE(rds_ring),
516 rds_ring->desc_head,
517 rds_ring->phys_addr);
518 rds_ring->desc_head = NULL;
519 }
520 }
521
522 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
523 sds_ring = &recv_ctx->sds_rings[ring];
524
525 if (sds_ring->desc_head != NULL) {
526 pci_free_consistent(adapter->pdev,
527 STATUS_DESC_RINGSIZE(sds_ring),
528 sds_ring->desc_head,
529 sds_ring->phys_addr);
530 sds_ring->desc_head = NULL;
531 }
532 }
533}
534
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
new file mode 100644
index 000000000000..8da6ec8c13b9
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -0,0 +1,1015 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include <linux/types.h>
26#include <linux/delay.h>
27#include <linux/pci.h>
28#include <linux/io.h>
29#include <linux/netdevice.h>
30#include <linux/ethtool.h>
31
32#include "qlcnic.h"
33
34struct qlcnic_stats {
35 char stat_string[ETH_GSTRING_LEN];
36 int sizeof_stat;
37 int stat_offset;
38};
39
40#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m)
41#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m)
42
43static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
44 {"xmit_called",
45 QLC_SIZEOF(stats.xmitcalled), QLC_OFF(stats.xmitcalled)},
46 {"xmit_finished",
47 QLC_SIZEOF(stats.xmitfinished), QLC_OFF(stats.xmitfinished)},
48 {"rx_dropped",
49 QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
50 {"tx_dropped",
51 QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
52 {"csummed",
53 QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
54 {"rx_pkts",
55 QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
56 {"lro_pkts",
57 QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
58 {"rx_bytes",
59 QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
60 {"tx_bytes",
61 QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
62};
63
64#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
65
66static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
67 "Register_Test_on_offline",
68 "Link_Test_on_offline",
69 "Interrupt_Test_offline",
70 "Loopback_Test_offline"
71};
72
73#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
74
75#define QLCNIC_RING_REGS_COUNT 20
76#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32))
77#define QLCNIC_MAX_EEPROM_LEN 1024
78
79static const u32 diag_registers[] = {
80 CRB_CMDPEG_STATE,
81 CRB_RCVPEG_STATE,
82 CRB_XG_STATE_P3,
83 CRB_FW_CAPABILITIES_1,
84 ISR_INT_STATE_REG,
85 QLCNIC_CRB_DEV_REF_COUNT,
86 QLCNIC_CRB_DEV_STATE,
87 QLCNIC_CRB_DRV_STATE,
88 QLCNIC_CRB_DRV_SCRATCH,
89 QLCNIC_CRB_DEV_PARTITION_INFO,
90 QLCNIC_CRB_DRV_IDC_VER,
91 QLCNIC_PEG_ALIVE_COUNTER,
92 QLCNIC_PEG_HALT_STATUS1,
93 QLCNIC_PEG_HALT_STATUS2,
94 QLCNIC_CRB_PEG_NET_0+0x3c,
95 QLCNIC_CRB_PEG_NET_1+0x3c,
96 QLCNIC_CRB_PEG_NET_2+0x3c,
97 QLCNIC_CRB_PEG_NET_4+0x3c,
98 -1
99};
100
101static int qlcnic_get_regs_len(struct net_device *dev)
102{
103 return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN;
104}
105
106static int qlcnic_get_eeprom_len(struct net_device *dev)
107{
108 return QLCNIC_FLASH_TOTAL_SIZE;
109}
110
111static void
112qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
113{
114 struct qlcnic_adapter *adapter = netdev_priv(dev);
115 u32 fw_major, fw_minor, fw_build;
116
117 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
118 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
119 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
120 sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
121
122 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
123 strlcpy(drvinfo->driver, qlcnic_driver_name, 32);
124 strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32);
125}
126
127static int
128qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
129{
130 struct qlcnic_adapter *adapter = netdev_priv(dev);
131 int check_sfp_module = 0;
132 u16 pcifn = adapter->ahw.pci_func;
133
134 /* read which mode */
135 if (adapter->ahw.port_type == QLCNIC_GBE) {
136 ecmd->supported = (SUPPORTED_10baseT_Half |
137 SUPPORTED_10baseT_Full |
138 SUPPORTED_100baseT_Half |
139 SUPPORTED_100baseT_Full |
140 SUPPORTED_1000baseT_Half |
141 SUPPORTED_1000baseT_Full);
142
143 ecmd->advertising = (ADVERTISED_100baseT_Half |
144 ADVERTISED_100baseT_Full |
145 ADVERTISED_1000baseT_Half |
146 ADVERTISED_1000baseT_Full);
147
148 ecmd->speed = adapter->link_speed;
149 ecmd->duplex = adapter->link_duplex;
150 ecmd->autoneg = adapter->link_autoneg;
151
152 } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
153 u32 val;
154
155 val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
156 if (val == QLCNIC_PORT_MODE_802_3_AP) {
157 ecmd->supported = SUPPORTED_1000baseT_Full;
158 ecmd->advertising = ADVERTISED_1000baseT_Full;
159 } else {
160 ecmd->supported = SUPPORTED_10000baseT_Full;
161 ecmd->advertising = ADVERTISED_10000baseT_Full;
162 }
163
164 if (netif_running(dev) && adapter->has_link_events) {
165 ecmd->speed = adapter->link_speed;
166 ecmd->autoneg = adapter->link_autoneg;
167 ecmd->duplex = adapter->link_duplex;
168 goto skip;
169 }
170
171 val = QLCRD32(adapter, P3_LINK_SPEED_REG(pcifn));
172 ecmd->speed = P3_LINK_SPEED_MHZ *
173 P3_LINK_SPEED_VAL(pcifn, val);
174 ecmd->duplex = DUPLEX_FULL;
175 ecmd->autoneg = AUTONEG_DISABLE;
176 } else
177 return -EIO;
178
179skip:
180 ecmd->phy_address = adapter->physical_port;
181 ecmd->transceiver = XCVR_EXTERNAL;
182
183 switch (adapter->ahw.board_type) {
184 case QLCNIC_BRDTYPE_P3_REF_QG:
185 case QLCNIC_BRDTYPE_P3_4_GB:
186 case QLCNIC_BRDTYPE_P3_4_GB_MM:
187
188 ecmd->supported |= SUPPORTED_Autoneg;
189 ecmd->advertising |= ADVERTISED_Autoneg;
190 case QLCNIC_BRDTYPE_P3_10G_CX4:
191 case QLCNIC_BRDTYPE_P3_10G_CX4_LP:
192 case QLCNIC_BRDTYPE_P3_10000_BASE_T:
193 ecmd->supported |= SUPPORTED_TP;
194 ecmd->advertising |= ADVERTISED_TP;
195 ecmd->port = PORT_TP;
196 ecmd->autoneg = adapter->link_autoneg;
197 break;
198 case QLCNIC_BRDTYPE_P3_IMEZ:
199 case QLCNIC_BRDTYPE_P3_XG_LOM:
200 case QLCNIC_BRDTYPE_P3_HMEZ:
201 ecmd->supported |= SUPPORTED_MII;
202 ecmd->advertising |= ADVERTISED_MII;
203 ecmd->port = PORT_MII;
204 ecmd->autoneg = AUTONEG_DISABLE;
205 break;
206 case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS:
207 case QLCNIC_BRDTYPE_P3_10G_SFP_CT:
208 case QLCNIC_BRDTYPE_P3_10G_SFP_QT:
209 ecmd->advertising |= ADVERTISED_TP;
210 ecmd->supported |= SUPPORTED_TP;
211 check_sfp_module = netif_running(dev) &&
212 adapter->has_link_events;
213 case QLCNIC_BRDTYPE_P3_10G_XFP:
214 ecmd->supported |= SUPPORTED_FIBRE;
215 ecmd->advertising |= ADVERTISED_FIBRE;
216 ecmd->port = PORT_FIBRE;
217 ecmd->autoneg = AUTONEG_DISABLE;
218 break;
219 case QLCNIC_BRDTYPE_P3_10G_TP:
220 if (adapter->ahw.port_type == QLCNIC_XGBE) {
221 ecmd->autoneg = AUTONEG_DISABLE;
222 ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
223 ecmd->advertising |=
224 (ADVERTISED_FIBRE | ADVERTISED_TP);
225 ecmd->port = PORT_FIBRE;
226 check_sfp_module = netif_running(dev) &&
227 adapter->has_link_events;
228 } else {
229 ecmd->autoneg = AUTONEG_ENABLE;
230 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
231 ecmd->advertising |=
232 (ADVERTISED_TP | ADVERTISED_Autoneg);
233 ecmd->port = PORT_TP;
234 }
235 break;
236 default:
237 dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
238 adapter->ahw.board_type);
239 return -EIO;
240 }
241
242 if (check_sfp_module) {
243 switch (adapter->module_type) {
244 case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
245 case LINKEVENT_MODULE_OPTICAL_SRLR:
246 case LINKEVENT_MODULE_OPTICAL_LRM:
247 case LINKEVENT_MODULE_OPTICAL_SFP_1G:
248 ecmd->port = PORT_FIBRE;
249 break;
250 case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
251 case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
252 case LINKEVENT_MODULE_TWINAX:
253 ecmd->port = PORT_TP;
254 break;
255 default:
256 ecmd->port = PORT_OTHER;
257 }
258 }
259
260 return 0;
261}
262
263static int
264qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
265{
266 struct qlcnic_adapter *adapter = netdev_priv(dev);
267 __u32 status;
268
269 /* read which mode */
270 if (adapter->ahw.port_type == QLCNIC_GBE) {
271 /* autonegotiation */
272 if (qlcnic_fw_cmd_set_phy(adapter,
273 QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG,
274 ecmd->autoneg) != 0)
275 return -EIO;
276 else
277 adapter->link_autoneg = ecmd->autoneg;
278
279 if (qlcnic_fw_cmd_query_phy(adapter,
280 QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
281 &status) != 0)
282 return -EIO;
283
284 switch (ecmd->speed) {
285 case SPEED_10:
286 qlcnic_set_phy_speed(status, 0);
287 break;
288 case SPEED_100:
289 qlcnic_set_phy_speed(status, 1);
290 break;
291 case SPEED_1000:
292 qlcnic_set_phy_speed(status, 2);
293 break;
294 }
295
296 if (ecmd->duplex == DUPLEX_HALF)
297 qlcnic_clear_phy_duplex(status);
298 if (ecmd->duplex == DUPLEX_FULL)
299 qlcnic_set_phy_duplex(status);
300 if (qlcnic_fw_cmd_set_phy(adapter,
301 QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
302 *((int *)&status)) != 0)
303 return -EIO;
304 else {
305 adapter->link_speed = ecmd->speed;
306 adapter->link_duplex = ecmd->duplex;
307 }
308 } else
309 return -EOPNOTSUPP;
310
311 if (!netif_running(dev))
312 return 0;
313
314 dev->netdev_ops->ndo_stop(dev);
315 return dev->netdev_ops->ndo_open(dev);
316}
317
318static void
319qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
320{
321 struct qlcnic_adapter *adapter = netdev_priv(dev);
322 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
323 struct qlcnic_host_sds_ring *sds_ring;
324 u32 *regs_buff = p;
325 int ring, i = 0;
326
327 memset(p, 0, qlcnic_get_regs_len(dev));
328 regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
329 (adapter->pdev)->device;
330
331 for (i = 0; diag_registers[i] != -1; i++)
332 regs_buff[i] = QLCRD32(adapter, diag_registers[i]);
333
334 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
335 return;
336
337 regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
338
339 regs_buff[i++] = 1; /* No. of tx ring */
340 regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
341 regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer);
342
343 regs_buff[i++] = 2; /* No. of rx ring */
344 regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer);
345 regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer);
346
347 regs_buff[i++] = adapter->max_sds_rings;
348
349 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
350 sds_ring = &(recv_ctx->sds_rings[ring]);
351 regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
352 }
353}
354
355static u32 qlcnic_test_link(struct net_device *dev)
356{
357 struct qlcnic_adapter *adapter = netdev_priv(dev);
358 u32 val;
359
360 val = QLCRD32(adapter, CRB_XG_STATE_P3);
361 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
362 return (val == XG_LINK_UP_P3) ? 0 : 1;
363}
364
365static int
366qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
367 u8 *bytes)
368{
369 struct qlcnic_adapter *adapter = netdev_priv(dev);
370 int offset;
371 int ret;
372
373 if (eeprom->len == 0)
374 return -EINVAL;
375
376 eeprom->magic = (adapter->pdev)->vendor |
377 ((adapter->pdev)->device << 16);
378 offset = eeprom->offset;
379
380 ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
381 eeprom->len);
382 if (ret < 0)
383 return ret;
384
385 return 0;
386}
387
388static void
389qlcnic_get_ringparam(struct net_device *dev,
390 struct ethtool_ringparam *ring)
391{
392 struct qlcnic_adapter *adapter = netdev_priv(dev);
393
394 ring->rx_pending = adapter->num_rxd;
395 ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
396 ring->rx_jumbo_pending += adapter->num_lro_rxd;
397 ring->tx_pending = adapter->num_txd;
398
399 if (adapter->ahw.port_type == QLCNIC_GBE) {
400 ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
401 ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
402 } else {
403 ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
404 ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
405 }
406
407 ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
408
409 ring->rx_mini_max_pending = 0;
410 ring->rx_mini_pending = 0;
411}
412
413static u32
414qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
415{
416 u32 num_desc;
417 num_desc = max(val, min);
418 num_desc = min(num_desc, max);
419 num_desc = roundup_pow_of_two(num_desc);
420
421 if (val != num_desc) {
422 printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n",
423 qlcnic_driver_name, r_name, num_desc, val);
424 }
425
426 return num_desc;
427}
428
429static int
430qlcnic_set_ringparam(struct net_device *dev,
431 struct ethtool_ringparam *ring)
432{
433 struct qlcnic_adapter *adapter = netdev_priv(dev);
434 u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
435 u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
436 u16 num_rxd, num_jumbo_rxd, num_txd;
437
438
439 if (ring->rx_mini_pending)
440 return -EOPNOTSUPP;
441
442 if (adapter->ahw.port_type == QLCNIC_GBE) {
443 max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
444 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
445 }
446
447 num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
448 MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx");
449
450 num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
451 MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo");
452
453 num_txd = qlcnic_validate_ringparam(ring->tx_pending,
454 MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
455
456 if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd &&
457 num_jumbo_rxd == adapter->num_jumbo_rxd)
458 return 0;
459
460 adapter->num_rxd = num_rxd;
461 adapter->num_jumbo_rxd = num_jumbo_rxd;
462 adapter->num_txd = num_txd;
463
464 return qlcnic_reset_context(adapter);
465}
466
467static void
468qlcnic_get_pauseparam(struct net_device *netdev,
469 struct ethtool_pauseparam *pause)
470{
471 struct qlcnic_adapter *adapter = netdev_priv(netdev);
472 int port = adapter->physical_port;
473 __u32 val;
474
475 if (adapter->ahw.port_type == QLCNIC_GBE) {
476 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
477 return;
478 /* get flow control settings */
479 val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
480 pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
481 val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
482 switch (port) {
483 case 0:
484 pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
485 break;
486 case 1:
487 pause->tx_pause = !(qlcnic_gb_get_gb1_mask(val));
488 break;
489 case 2:
490 pause->tx_pause = !(qlcnic_gb_get_gb2_mask(val));
491 break;
492 case 3:
493 default:
494 pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val));
495 break;
496 }
497 } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
498 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
499 return;
500 pause->rx_pause = 1;
501 val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
502 if (port == 0)
503 pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
504 else
505 pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val));
506 } else {
507 dev_err(&netdev->dev, "Unknown board type: %x\n",
508 adapter->ahw.port_type);
509 }
510}
511
512static int
513qlcnic_set_pauseparam(struct net_device *netdev,
514 struct ethtool_pauseparam *pause)
515{
516 struct qlcnic_adapter *adapter = netdev_priv(netdev);
517 int port = adapter->physical_port;
518 __u32 val;
519
520 /* read mode */
521 if (adapter->ahw.port_type == QLCNIC_GBE) {
522 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
523 return -EIO;
524 /* set flow control */
525 val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
526
527 if (pause->rx_pause)
528 qlcnic_gb_rx_flowctl(val);
529 else
530 qlcnic_gb_unset_rx_flowctl(val);
531
532 QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port),
533 val);
534 /* set autoneg */
535 val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
536 switch (port) {
537 case 0:
538 if (pause->tx_pause)
539 qlcnic_gb_unset_gb0_mask(val);
540 else
541 qlcnic_gb_set_gb0_mask(val);
542 break;
543 case 1:
544 if (pause->tx_pause)
545 qlcnic_gb_unset_gb1_mask(val);
546 else
547 qlcnic_gb_set_gb1_mask(val);
548 break;
549 case 2:
550 if (pause->tx_pause)
551 qlcnic_gb_unset_gb2_mask(val);
552 else
553 qlcnic_gb_set_gb2_mask(val);
554 break;
555 case 3:
556 default:
557 if (pause->tx_pause)
558 qlcnic_gb_unset_gb3_mask(val);
559 else
560 qlcnic_gb_set_gb3_mask(val);
561 break;
562 }
563 QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val);
564 } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
565 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
566 return -EIO;
567 val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
568 if (port == 0) {
569 if (pause->tx_pause)
570 qlcnic_xg_unset_xg0_mask(val);
571 else
572 qlcnic_xg_set_xg0_mask(val);
573 } else {
574 if (pause->tx_pause)
575 qlcnic_xg_unset_xg1_mask(val);
576 else
577 qlcnic_xg_set_xg1_mask(val);
578 }
579 QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val);
580 } else {
581 dev_err(&netdev->dev, "Unknown board type: %x\n",
582 adapter->ahw.port_type);
583 }
584 return 0;
585}
586
587static int qlcnic_reg_test(struct net_device *dev)
588{
589 struct qlcnic_adapter *adapter = netdev_priv(dev);
590 u32 data_read, data_written;
591
592 data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
593 if ((data_read & 0xffff) != adapter->pdev->vendor)
594 return 1;
595
596 data_written = (u32)0xa5a5a5a5;
597
598 QLCWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
599 data_read = QLCRD32(adapter, CRB_SCRATCHPAD_TEST);
600 if (data_written != data_read)
601 return 1;
602
603 return 0;
604}
605
606static int qlcnic_get_sset_count(struct net_device *dev, int sset)
607{
608 switch (sset) {
609 case ETH_SS_TEST:
610 return QLCNIC_TEST_LEN;
611 case ETH_SS_STATS:
612 return QLCNIC_STATS_LEN;
613 default:
614 return -EOPNOTSUPP;
615 }
616}
617
618#define QLC_ILB_PKT_SIZE 64
619
620static void qlcnic_create_loopback_buff(unsigned char *data)
621{
622 unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00};
623 memset(data, 0x4e, QLC_ILB_PKT_SIZE);
624 memset(data, 0xff, 12);
625 memcpy(data + 12, random_data, sizeof(random_data));
626}
627
628int qlcnic_check_loopback_buff(unsigned char *data)
629{
630 unsigned char buff[QLC_ILB_PKT_SIZE];
631 qlcnic_create_loopback_buff(buff);
632 return memcmp(data, buff, QLC_ILB_PKT_SIZE);
633}
634
635static int qlcnic_do_ilb_test(struct qlcnic_adapter *adapter)
636{
637 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
638 struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
639 struct sk_buff *skb;
640 int i;
641
642 for (i = 0; i < 16; i++) {
643 skb = dev_alloc_skb(QLC_ILB_PKT_SIZE);
644 qlcnic_create_loopback_buff(skb->data);
645 skb_put(skb, QLC_ILB_PKT_SIZE);
646
647 adapter->diag_cnt = 0;
648
649 qlcnic_xmit_frame(skb, adapter->netdev);
650
651 msleep(5);
652
653 qlcnic_process_rcv_ring_diag(sds_ring);
654
655 dev_kfree_skb_any(skb);
656 if (!adapter->diag_cnt)
657 return -1;
658 }
659 return 0;
660}
661
662static int qlcnic_loopback_test(struct net_device *netdev)
663{
664 struct qlcnic_adapter *adapter = netdev_priv(netdev);
665 int max_sds_rings = adapter->max_sds_rings;
666 int ret;
667
668 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
669 return -EIO;
670
671 ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
672 if (ret)
673 goto clear_it;
674
675 ret = qlcnic_set_ilb_mode(adapter);
676 if (ret)
677 goto done;
678
679 ret = qlcnic_do_ilb_test(adapter);
680
681 qlcnic_clear_ilb_mode(adapter);
682
683done:
684 qlcnic_diag_free_res(netdev, max_sds_rings);
685
686clear_it:
687 adapter->max_sds_rings = max_sds_rings;
688 clear_bit(__QLCNIC_RESETTING, &adapter->state);
689 return ret;
690}
691
692static int qlcnic_irq_test(struct net_device *netdev)
693{
694 struct qlcnic_adapter *adapter = netdev_priv(netdev);
695 int max_sds_rings = adapter->max_sds_rings;
696 int ret;
697
698 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
699 return -EIO;
700
701 ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
702 if (ret)
703 goto clear_it;
704
705 adapter->diag_cnt = 0;
706 ret = qlcnic_issue_cmd(adapter, adapter->ahw.pci_func,
707 QLCHAL_VERSION, adapter->portnum, 0, 0, 0x00000011);
708 if (ret)
709 goto done;
710
711 msleep(10);
712
713 ret = !adapter->diag_cnt;
714
715done:
716 qlcnic_diag_free_res(netdev, max_sds_rings);
717
718clear_it:
719 adapter->max_sds_rings = max_sds_rings;
720 clear_bit(__QLCNIC_RESETTING, &adapter->state);
721 return ret;
722}
723
724static void
725qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
726 u64 *data)
727{
728 memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN);
729
730 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
731 data[2] = qlcnic_irq_test(dev);
732 if (data[2])
733 eth_test->flags |= ETH_TEST_FL_FAILED;
734
735 data[3] = qlcnic_loopback_test(dev);
736 if (data[3])
737 eth_test->flags |= ETH_TEST_FL_FAILED;
738
739 }
740
741 data[0] = qlcnic_reg_test(dev);
742 if (data[0])
743 eth_test->flags |= ETH_TEST_FL_FAILED;
744
745 /* link test */
746 data[1] = (u64) qlcnic_test_link(dev);
747 if (data[1])
748 eth_test->flags |= ETH_TEST_FL_FAILED;
749}
750
751static void
752qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
753{
754 int index;
755
756 switch (stringset) {
757 case ETH_SS_TEST:
758 memcpy(data, *qlcnic_gstrings_test,
759 QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
760 break;
761 case ETH_SS_STATS:
762 for (index = 0; index < QLCNIC_STATS_LEN; index++) {
763 memcpy(data + index * ETH_GSTRING_LEN,
764 qlcnic_gstrings_stats[index].stat_string,
765 ETH_GSTRING_LEN);
766 }
767 break;
768 }
769}
770
771static void
772qlcnic_get_ethtool_stats(struct net_device *dev,
773 struct ethtool_stats *stats, u64 * data)
774{
775 struct qlcnic_adapter *adapter = netdev_priv(dev);
776 int index;
777
778 for (index = 0; index < QLCNIC_STATS_LEN; index++) {
779 char *p =
780 (char *)adapter +
781 qlcnic_gstrings_stats[index].stat_offset;
782 data[index] =
783 (qlcnic_gstrings_stats[index].sizeof_stat ==
784 sizeof(u64)) ? *(u64 *)p:(*(u32 *)p);
785 }
786}
787
788static u32 qlcnic_get_rx_csum(struct net_device *dev)
789{
790 struct qlcnic_adapter *adapter = netdev_priv(dev);
791 return adapter->rx_csum;
792}
793
794static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
795{
796 struct qlcnic_adapter *adapter = netdev_priv(dev);
797 adapter->rx_csum = !!data;
798 return 0;
799}
800
801static u32 qlcnic_get_tso(struct net_device *dev)
802{
803 return (dev->features & (NETIF_F_TSO | NETIF_F_TSO6)) != 0;
804}
805
806static int qlcnic_set_tso(struct net_device *dev, u32 data)
807{
808 if (data)
809 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
810 else
811 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
812
813 return 0;
814}
815
816static int qlcnic_blink_led(struct net_device *dev, u32 val)
817{
818 struct qlcnic_adapter *adapter = netdev_priv(dev);
819 int ret;
820
821 ret = qlcnic_config_led(adapter, 1, 0xf);
822 if (ret) {
823 dev_err(&adapter->pdev->dev,
824 "Failed to set LED blink state.\n");
825 return ret;
826 }
827
828 msleep_interruptible(val * 1000);
829
830 ret = qlcnic_config_led(adapter, 0, 0xf);
831 if (ret) {
832 dev_err(&adapter->pdev->dev,
833 "Failed to reset LED blink state.\n");
834 return ret;
835 }
836
837 return 0;
838}
839
840static void
841qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
842{
843 struct qlcnic_adapter *adapter = netdev_priv(dev);
844 u32 wol_cfg;
845
846 wol->supported = 0;
847 wol->wolopts = 0;
848
849 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
850 if (wol_cfg & (1UL << adapter->portnum))
851 wol->supported |= WAKE_MAGIC;
852
853 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
854 if (wol_cfg & (1UL << adapter->portnum))
855 wol->wolopts |= WAKE_MAGIC;
856}
857
858static int
859qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
860{
861 struct qlcnic_adapter *adapter = netdev_priv(dev);
862 u32 wol_cfg;
863
864 if (wol->wolopts & ~WAKE_MAGIC)
865 return -EOPNOTSUPP;
866
867 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
868 if (!(wol_cfg & (1 << adapter->portnum)))
869 return -EOPNOTSUPP;
870
871 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
872 if (wol->wolopts & WAKE_MAGIC)
873 wol_cfg |= 1UL << adapter->portnum;
874 else
875 wol_cfg &= ~(1UL << adapter->portnum);
876
877 QLCWR32(adapter, QLCNIC_WOL_CONFIG, wol_cfg);
878
879 return 0;
880}
881
882/*
883 * Set the coalescing parameters. Currently only normal is supported.
884 * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the
885 * firmware coalescing to default.
886 */
887static int qlcnic_set_intr_coalesce(struct net_device *netdev,
888 struct ethtool_coalesce *ethcoal)
889{
890 struct qlcnic_adapter *adapter = netdev_priv(netdev);
891
892 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
893 return -EINVAL;
894
895 /*
896 * Return Error if unsupported values or
897 * unsupported parameters are set.
898 */
899 if (ethcoal->rx_coalesce_usecs > 0xffff ||
900 ethcoal->rx_max_coalesced_frames > 0xffff ||
901 ethcoal->tx_coalesce_usecs > 0xffff ||
902 ethcoal->tx_max_coalesced_frames > 0xffff ||
903 ethcoal->rx_coalesce_usecs_irq ||
904 ethcoal->rx_max_coalesced_frames_irq ||
905 ethcoal->tx_coalesce_usecs_irq ||
906 ethcoal->tx_max_coalesced_frames_irq ||
907 ethcoal->stats_block_coalesce_usecs ||
908 ethcoal->use_adaptive_rx_coalesce ||
909 ethcoal->use_adaptive_tx_coalesce ||
910 ethcoal->pkt_rate_low ||
911 ethcoal->rx_coalesce_usecs_low ||
912 ethcoal->rx_max_coalesced_frames_low ||
913 ethcoal->tx_coalesce_usecs_low ||
914 ethcoal->tx_max_coalesced_frames_low ||
915 ethcoal->pkt_rate_high ||
916 ethcoal->rx_coalesce_usecs_high ||
917 ethcoal->rx_max_coalesced_frames_high ||
918 ethcoal->tx_coalesce_usecs_high ||
919 ethcoal->tx_max_coalesced_frames_high)
920 return -EINVAL;
921
922 if (!ethcoal->rx_coalesce_usecs ||
923 !ethcoal->rx_max_coalesced_frames) {
924 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
925 adapter->coal.normal.data.rx_time_us =
926 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
927 adapter->coal.normal.data.rx_packets =
928 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
929 } else {
930 adapter->coal.flags = 0;
931 adapter->coal.normal.data.rx_time_us =
932 ethcoal->rx_coalesce_usecs;
933 adapter->coal.normal.data.rx_packets =
934 ethcoal->rx_max_coalesced_frames;
935 }
936 adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs;
937 adapter->coal.normal.data.tx_packets =
938 ethcoal->tx_max_coalesced_frames;
939
940 qlcnic_config_intr_coalesce(adapter);
941
942 return 0;
943}
944
945static int qlcnic_get_intr_coalesce(struct net_device *netdev,
946 struct ethtool_coalesce *ethcoal)
947{
948 struct qlcnic_adapter *adapter = netdev_priv(netdev);
949
950 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
951 return -EINVAL;
952
953 ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us;
954 ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us;
955 ethcoal->rx_max_coalesced_frames =
956 adapter->coal.normal.data.rx_packets;
957 ethcoal->tx_max_coalesced_frames =
958 adapter->coal.normal.data.tx_packets;
959
960 return 0;
961}
962
963static int qlcnic_set_flags(struct net_device *netdev, u32 data)
964{
965 struct qlcnic_adapter *adapter = netdev_priv(netdev);
966 int hw_lro;
967
968 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
969 return -EINVAL;
970
971 ethtool_op_set_flags(netdev, data);
972
973 hw_lro = (data & ETH_FLAG_LRO) ? QLCNIC_LRO_ENABLED : 0;
974
975 if (qlcnic_config_hw_lro(adapter, hw_lro))
976 return -EIO;
977
978 if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
979 return -EIO;
980
981
982 return 0;
983}
984
985const struct ethtool_ops qlcnic_ethtool_ops = {
986 .get_settings = qlcnic_get_settings,
987 .set_settings = qlcnic_set_settings,
988 .get_drvinfo = qlcnic_get_drvinfo,
989 .get_regs_len = qlcnic_get_regs_len,
990 .get_regs = qlcnic_get_regs,
991 .get_link = ethtool_op_get_link,
992 .get_eeprom_len = qlcnic_get_eeprom_len,
993 .get_eeprom = qlcnic_get_eeprom,
994 .get_ringparam = qlcnic_get_ringparam,
995 .set_ringparam = qlcnic_set_ringparam,
996 .get_pauseparam = qlcnic_get_pauseparam,
997 .set_pauseparam = qlcnic_set_pauseparam,
998 .set_tx_csum = ethtool_op_set_tx_csum,
999 .set_sg = ethtool_op_set_sg,
1000 .get_tso = qlcnic_get_tso,
1001 .set_tso = qlcnic_set_tso,
1002 .get_wol = qlcnic_get_wol,
1003 .set_wol = qlcnic_set_wol,
1004 .self_test = qlcnic_diag_test,
1005 .get_strings = qlcnic_get_strings,
1006 .get_ethtool_stats = qlcnic_get_ethtool_stats,
1007 .get_sset_count = qlcnic_get_sset_count,
1008 .get_rx_csum = qlcnic_get_rx_csum,
1009 .set_rx_csum = qlcnic_set_rx_csum,
1010 .get_coalesce = qlcnic_get_intr_coalesce,
1011 .set_coalesce = qlcnic_set_intr_coalesce,
1012 .get_flags = ethtool_op_get_flags,
1013 .set_flags = qlcnic_set_flags,
1014 .phys_id = qlcnic_blink_led,
1015};
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
new file mode 100644
index 000000000000..0469f84360a4
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -0,0 +1,937 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#ifndef __QLCNIC_HDR_H_
26#define __QLCNIC_HDR_H_
27
28#include <linux/kernel.h>
29#include <linux/types.h>
30
31/*
32 * The basic unit of access when reading/writing control registers.
33 */
34
35enum {
36 QLCNIC_HW_H0_CH_HUB_ADR = 0x05,
37 QLCNIC_HW_H1_CH_HUB_ADR = 0x0E,
38 QLCNIC_HW_H2_CH_HUB_ADR = 0x03,
39 QLCNIC_HW_H3_CH_HUB_ADR = 0x01,
40 QLCNIC_HW_H4_CH_HUB_ADR = 0x06,
41 QLCNIC_HW_H5_CH_HUB_ADR = 0x07,
42 QLCNIC_HW_H6_CH_HUB_ADR = 0x08
43};
44
45/* Hub 0 */
46enum {
47 QLCNIC_HW_MN_CRB_AGT_ADR = 0x15,
48 QLCNIC_HW_MS_CRB_AGT_ADR = 0x25
49};
50
51/* Hub 1 */
52enum {
53 QLCNIC_HW_PS_CRB_AGT_ADR = 0x73,
54 QLCNIC_HW_SS_CRB_AGT_ADR = 0x20,
55 QLCNIC_HW_RPMX3_CRB_AGT_ADR = 0x0b,
56 QLCNIC_HW_QMS_CRB_AGT_ADR = 0x00,
57 QLCNIC_HW_SQGS0_CRB_AGT_ADR = 0x01,
58 QLCNIC_HW_SQGS1_CRB_AGT_ADR = 0x02,
59 QLCNIC_HW_SQGS2_CRB_AGT_ADR = 0x03,
60 QLCNIC_HW_SQGS3_CRB_AGT_ADR = 0x04,
61 QLCNIC_HW_C2C0_CRB_AGT_ADR = 0x58,
62 QLCNIC_HW_C2C1_CRB_AGT_ADR = 0x59,
63 QLCNIC_HW_C2C2_CRB_AGT_ADR = 0x5a,
64 QLCNIC_HW_RPMX2_CRB_AGT_ADR = 0x0a,
65 QLCNIC_HW_RPMX4_CRB_AGT_ADR = 0x0c,
66 QLCNIC_HW_RPMX7_CRB_AGT_ADR = 0x0f,
67 QLCNIC_HW_RPMX9_CRB_AGT_ADR = 0x12,
68 QLCNIC_HW_SMB_CRB_AGT_ADR = 0x18
69};
70
71/* Hub 2 */
72enum {
73 QLCNIC_HW_NIU_CRB_AGT_ADR = 0x31,
74 QLCNIC_HW_I2C0_CRB_AGT_ADR = 0x19,
75 QLCNIC_HW_I2C1_CRB_AGT_ADR = 0x29,
76
77 QLCNIC_HW_SN_CRB_AGT_ADR = 0x10,
78 QLCNIC_HW_I2Q_CRB_AGT_ADR = 0x20,
79 QLCNIC_HW_LPC_CRB_AGT_ADR = 0x22,
80 QLCNIC_HW_ROMUSB_CRB_AGT_ADR = 0x21,
81 QLCNIC_HW_QM_CRB_AGT_ADR = 0x66,
82 QLCNIC_HW_SQG0_CRB_AGT_ADR = 0x60,
83 QLCNIC_HW_SQG1_CRB_AGT_ADR = 0x61,
84 QLCNIC_HW_SQG2_CRB_AGT_ADR = 0x62,
85 QLCNIC_HW_SQG3_CRB_AGT_ADR = 0x63,
86 QLCNIC_HW_RPMX1_CRB_AGT_ADR = 0x09,
87 QLCNIC_HW_RPMX5_CRB_AGT_ADR = 0x0d,
88 QLCNIC_HW_RPMX6_CRB_AGT_ADR = 0x0e,
89 QLCNIC_HW_RPMX8_CRB_AGT_ADR = 0x11
90};
91
92/* Hub 3 */
93enum {
94 QLCNIC_HW_PH_CRB_AGT_ADR = 0x1A,
95 QLCNIC_HW_SRE_CRB_AGT_ADR = 0x50,
96 QLCNIC_HW_EG_CRB_AGT_ADR = 0x51,
97 QLCNIC_HW_RPMX0_CRB_AGT_ADR = 0x08
98};
99
100/* Hub 4 */
101enum {
102 QLCNIC_HW_PEGN0_CRB_AGT_ADR = 0x40,
103 QLCNIC_HW_PEGN1_CRB_AGT_ADR,
104 QLCNIC_HW_PEGN2_CRB_AGT_ADR,
105 QLCNIC_HW_PEGN3_CRB_AGT_ADR,
106 QLCNIC_HW_PEGNI_CRB_AGT_ADR,
107 QLCNIC_HW_PEGND_CRB_AGT_ADR,
108 QLCNIC_HW_PEGNC_CRB_AGT_ADR,
109 QLCNIC_HW_PEGR0_CRB_AGT_ADR,
110 QLCNIC_HW_PEGR1_CRB_AGT_ADR,
111 QLCNIC_HW_PEGR2_CRB_AGT_ADR,
112 QLCNIC_HW_PEGR3_CRB_AGT_ADR,
113 QLCNIC_HW_PEGN4_CRB_AGT_ADR
114};
115
116/* Hub 5 */
117enum {
118 QLCNIC_HW_PEGS0_CRB_AGT_ADR = 0x40,
119 QLCNIC_HW_PEGS1_CRB_AGT_ADR,
120 QLCNIC_HW_PEGS2_CRB_AGT_ADR,
121 QLCNIC_HW_PEGS3_CRB_AGT_ADR,
122 QLCNIC_HW_PEGSI_CRB_AGT_ADR,
123 QLCNIC_HW_PEGSD_CRB_AGT_ADR,
124 QLCNIC_HW_PEGSC_CRB_AGT_ADR
125};
126
127/* Hub 6 */
128enum {
129 QLCNIC_HW_CAS0_CRB_AGT_ADR = 0x46,
130 QLCNIC_HW_CAS1_CRB_AGT_ADR = 0x47,
131 QLCNIC_HW_CAS2_CRB_AGT_ADR = 0x48,
132 QLCNIC_HW_CAS3_CRB_AGT_ADR = 0x49,
133 QLCNIC_HW_NCM_CRB_AGT_ADR = 0x16,
134 QLCNIC_HW_TMR_CRB_AGT_ADR = 0x17,
135 QLCNIC_HW_XDMA_CRB_AGT_ADR = 0x05,
136 QLCNIC_HW_OCM0_CRB_AGT_ADR = 0x06,
137 QLCNIC_HW_OCM1_CRB_AGT_ADR = 0x07
138};
139
140/* Floaters - non existent modules */
141#define QLCNIC_HW_EFC_RPMX0_CRB_AGT_ADR 0x67
142
143/* This field defines PCI/X adr [25:20] of agents on the CRB */
144enum {
145 QLCNIC_HW_PX_MAP_CRB_PH = 0,
146 QLCNIC_HW_PX_MAP_CRB_PS,
147 QLCNIC_HW_PX_MAP_CRB_MN,
148 QLCNIC_HW_PX_MAP_CRB_MS,
149 QLCNIC_HW_PX_MAP_CRB_PGR1,
150 QLCNIC_HW_PX_MAP_CRB_SRE,
151 QLCNIC_HW_PX_MAP_CRB_NIU,
152 QLCNIC_HW_PX_MAP_CRB_QMN,
153 QLCNIC_HW_PX_MAP_CRB_SQN0,
154 QLCNIC_HW_PX_MAP_CRB_SQN1,
155 QLCNIC_HW_PX_MAP_CRB_SQN2,
156 QLCNIC_HW_PX_MAP_CRB_SQN3,
157 QLCNIC_HW_PX_MAP_CRB_QMS,
158 QLCNIC_HW_PX_MAP_CRB_SQS0,
159 QLCNIC_HW_PX_MAP_CRB_SQS1,
160 QLCNIC_HW_PX_MAP_CRB_SQS2,
161 QLCNIC_HW_PX_MAP_CRB_SQS3,
162 QLCNIC_HW_PX_MAP_CRB_PGN0,
163 QLCNIC_HW_PX_MAP_CRB_PGN1,
164 QLCNIC_HW_PX_MAP_CRB_PGN2,
165 QLCNIC_HW_PX_MAP_CRB_PGN3,
166 QLCNIC_HW_PX_MAP_CRB_PGND,
167 QLCNIC_HW_PX_MAP_CRB_PGNI,
168 QLCNIC_HW_PX_MAP_CRB_PGS0,
169 QLCNIC_HW_PX_MAP_CRB_PGS1,
170 QLCNIC_HW_PX_MAP_CRB_PGS2,
171 QLCNIC_HW_PX_MAP_CRB_PGS3,
172 QLCNIC_HW_PX_MAP_CRB_PGSD,
173 QLCNIC_HW_PX_MAP_CRB_PGSI,
174 QLCNIC_HW_PX_MAP_CRB_SN,
175 QLCNIC_HW_PX_MAP_CRB_PGR2,
176 QLCNIC_HW_PX_MAP_CRB_EG,
177 QLCNIC_HW_PX_MAP_CRB_PH2,
178 QLCNIC_HW_PX_MAP_CRB_PS2,
179 QLCNIC_HW_PX_MAP_CRB_CAM,
180 QLCNIC_HW_PX_MAP_CRB_CAS0,
181 QLCNIC_HW_PX_MAP_CRB_CAS1,
182 QLCNIC_HW_PX_MAP_CRB_CAS2,
183 QLCNIC_HW_PX_MAP_CRB_C2C0,
184 QLCNIC_HW_PX_MAP_CRB_C2C1,
185 QLCNIC_HW_PX_MAP_CRB_TIMR,
186 QLCNIC_HW_PX_MAP_CRB_PGR3,
187 QLCNIC_HW_PX_MAP_CRB_RPMX1,
188 QLCNIC_HW_PX_MAP_CRB_RPMX2,
189 QLCNIC_HW_PX_MAP_CRB_RPMX3,
190 QLCNIC_HW_PX_MAP_CRB_RPMX4,
191 QLCNIC_HW_PX_MAP_CRB_RPMX5,
192 QLCNIC_HW_PX_MAP_CRB_RPMX6,
193 QLCNIC_HW_PX_MAP_CRB_RPMX7,
194 QLCNIC_HW_PX_MAP_CRB_XDMA,
195 QLCNIC_HW_PX_MAP_CRB_I2Q,
196 QLCNIC_HW_PX_MAP_CRB_ROMUSB,
197 QLCNIC_HW_PX_MAP_CRB_CAS3,
198 QLCNIC_HW_PX_MAP_CRB_RPMX0,
199 QLCNIC_HW_PX_MAP_CRB_RPMX8,
200 QLCNIC_HW_PX_MAP_CRB_RPMX9,
201 QLCNIC_HW_PX_MAP_CRB_OCM0,
202 QLCNIC_HW_PX_MAP_CRB_OCM1,
203 QLCNIC_HW_PX_MAP_CRB_SMB,
204 QLCNIC_HW_PX_MAP_CRB_I2C0,
205 QLCNIC_HW_PX_MAP_CRB_I2C1,
206 QLCNIC_HW_PX_MAP_CRB_LPC,
207 QLCNIC_HW_PX_MAP_CRB_PGNC,
208 QLCNIC_HW_PX_MAP_CRB_PGR0
209};
210
211/* This field defines CRB adr [31:20] of the agents */
212
213#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \
214 ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MN_CRB_AGT_ADR)
215#define QLCNIC_HW_CRB_HUB_AGT_ADR_PH \
216 ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_PH_CRB_AGT_ADR)
217#define QLCNIC_HW_CRB_HUB_AGT_ADR_MS \
218 ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MS_CRB_AGT_ADR)
219
220#define QLCNIC_HW_CRB_HUB_AGT_ADR_PS \
221 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_PS_CRB_AGT_ADR)
222#define QLCNIC_HW_CRB_HUB_AGT_ADR_SS \
223 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SS_CRB_AGT_ADR)
224#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3 \
225 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX3_CRB_AGT_ADR)
226#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMS \
227 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_QMS_CRB_AGT_ADR)
228#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS0 \
229 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS0_CRB_AGT_ADR)
230#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS1 \
231 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS1_CRB_AGT_ADR)
232#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS2 \
233 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS2_CRB_AGT_ADR)
234#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS3 \
235 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS3_CRB_AGT_ADR)
236#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C0 \
237 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C0_CRB_AGT_ADR)
238#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C1 \
239 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C1_CRB_AGT_ADR)
240#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2 \
241 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX2_CRB_AGT_ADR)
242#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4 \
243 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX4_CRB_AGT_ADR)
244#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7 \
245 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX7_CRB_AGT_ADR)
246#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9 \
247 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX9_CRB_AGT_ADR)
248#define QLCNIC_HW_CRB_HUB_AGT_ADR_SMB \
249 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SMB_CRB_AGT_ADR)
250
251#define QLCNIC_HW_CRB_HUB_AGT_ADR_NIU \
252 ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_NIU_CRB_AGT_ADR)
253#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0 \
254 ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C0_CRB_AGT_ADR)
255#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1 \
256 ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C1_CRB_AGT_ADR)
257
258#define QLCNIC_HW_CRB_HUB_AGT_ADR_SRE \
259 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SRE_CRB_AGT_ADR)
260#define QLCNIC_HW_CRB_HUB_AGT_ADR_EG \
261 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_EG_CRB_AGT_ADR)
262#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0 \
263 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX0_CRB_AGT_ADR)
264#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMN \
265 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_QM_CRB_AGT_ADR)
266#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0 \
267 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG0_CRB_AGT_ADR)
268#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1 \
269 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG1_CRB_AGT_ADR)
270#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2 \
271 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG2_CRB_AGT_ADR)
272#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3 \
273 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG3_CRB_AGT_ADR)
274#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1 \
275 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX1_CRB_AGT_ADR)
276#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5 \
277 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX5_CRB_AGT_ADR)
278#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6 \
279 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX6_CRB_AGT_ADR)
280#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8 \
281 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX8_CRB_AGT_ADR)
282#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS0 \
283 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS0_CRB_AGT_ADR)
284#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS1 \
285 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS1_CRB_AGT_ADR)
286#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS2 \
287 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS2_CRB_AGT_ADR)
288#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS3 \
289 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS3_CRB_AGT_ADR)
290
291#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI \
292 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNI_CRB_AGT_ADR)
293#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGND \
294 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGND_CRB_AGT_ADR)
295#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0 \
296 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN0_CRB_AGT_ADR)
297#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1 \
298 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN1_CRB_AGT_ADR)
299#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2 \
300 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN2_CRB_AGT_ADR)
301#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3 \
302 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN3_CRB_AGT_ADR)
303#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4 \
304 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN4_CRB_AGT_ADR)
305#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC \
306 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNC_CRB_AGT_ADR)
307#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR0 \
308 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR0_CRB_AGT_ADR)
309#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR1 \
310 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR1_CRB_AGT_ADR)
311#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR2 \
312 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR2_CRB_AGT_ADR)
313#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR3 \
314 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR3_CRB_AGT_ADR)
315
316#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI \
317 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSI_CRB_AGT_ADR)
318#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSD \
319 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSD_CRB_AGT_ADR)
320#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0 \
321 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS0_CRB_AGT_ADR)
322#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1 \
323 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS1_CRB_AGT_ADR)
324#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2 \
325 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS2_CRB_AGT_ADR)
326#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3 \
327 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS3_CRB_AGT_ADR)
328#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSC \
329 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSC_CRB_AGT_ADR)
330
331#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAM \
332 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_NCM_CRB_AGT_ADR)
333#define QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR \
334 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_TMR_CRB_AGT_ADR)
335#define QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA \
336 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_XDMA_CRB_AGT_ADR)
337#define QLCNIC_HW_CRB_HUB_AGT_ADR_SN \
338 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_SN_CRB_AGT_ADR)
339#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q \
340 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_I2Q_CRB_AGT_ADR)
341#define QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB \
342 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_ROMUSB_CRB_AGT_ADR)
343#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0 \
344 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM0_CRB_AGT_ADR)
345#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM1 \
346 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM1_CRB_AGT_ADR)
347#define QLCNIC_HW_CRB_HUB_AGT_ADR_LPC \
348 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_LPC_CRB_AGT_ADR)
349
350#define QLCNIC_SRE_MISC (QLCNIC_CRB_SRE + 0x0002c)
351
352#define QLCNIC_I2Q_CLR_PCI_HI (QLCNIC_CRB_I2Q + 0x00034)
353
354#define ROMUSB_GLB (QLCNIC_CRB_ROMUSB + 0x00000)
355#define ROMUSB_ROM (QLCNIC_CRB_ROMUSB + 0x10000)
356
357#define QLCNIC_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
358#define QLCNIC_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
359#define QLCNIC_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c)
360#define QLCNIC_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
361#define QLCNIC_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044)
362#define QLCNIC_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
363#define QLCNIC_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8)
364
365#define QLCNIC_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n)))
366
367#define QLCNIC_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
368#define QLCNIC_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
369#define QLCNIC_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
370#define QLCNIC_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
371#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
372#define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
373
374/* Lock IDs for ROM lock */
375#define ROM_LOCK_DRIVER 0x0d417340
376
377/******************************************************************************
378*
379* Definitions specific to M25P flash
380*
381*******************************************************************************
382*/
383
384/* all are 1MB windows */
385
386#define QLCNIC_PCI_CRB_WINDOWSIZE 0x00100000
387#define QLCNIC_PCI_CRB_WINDOW(A) \
388 (QLCNIC_PCI_CRBSPACE + (A)*QLCNIC_PCI_CRB_WINDOWSIZE)
389
390#define QLCNIC_CRB_NIU QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_NIU)
391#define QLCNIC_CRB_SRE QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SRE)
392#define QLCNIC_CRB_ROMUSB \
393 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_ROMUSB)
394#define QLCNIC_CRB_I2Q QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2Q)
395#define QLCNIC_CRB_I2C0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2C0)
396#define QLCNIC_CRB_SMB QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SMB)
397#define QLCNIC_CRB_MAX QLCNIC_PCI_CRB_WINDOW(64)
398
399#define QLCNIC_CRB_PCIX_HOST QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH)
400#define QLCNIC_CRB_PCIX_HOST2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH2)
401#define QLCNIC_CRB_PEG_NET_0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN0)
402#define QLCNIC_CRB_PEG_NET_1 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN1)
403#define QLCNIC_CRB_PEG_NET_2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN2)
404#define QLCNIC_CRB_PEG_NET_3 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN3)
405#define QLCNIC_CRB_PEG_NET_4 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SQS2)
406#define QLCNIC_CRB_PEG_NET_D QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGND)
407#define QLCNIC_CRB_PEG_NET_I QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGNI)
408#define QLCNIC_CRB_DDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_MN)
409#define QLCNIC_CRB_QDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SN)
410
411#define QLCNIC_CRB_PCIX_MD QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PS)
412#define QLCNIC_CRB_PCIE QLCNIC_CRB_PCIX_MD
413
414#define ISR_INT_VECTOR (QLCNIC_PCIX_PS_REG(PCIX_INT_VECTOR))
415#define ISR_INT_MASK (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
416#define ISR_INT_MASK_SLOW (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
417#define ISR_INT_TARGET_STATUS (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS))
418#define ISR_INT_TARGET_MASK (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK))
419#define ISR_INT_TARGET_STATUS_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
420#define ISR_INT_TARGET_MASK_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
421#define ISR_INT_TARGET_STATUS_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
422#define ISR_INT_TARGET_MASK_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
423#define ISR_INT_TARGET_STATUS_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
424#define ISR_INT_TARGET_MASK_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
425#define ISR_INT_TARGET_STATUS_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
426#define ISR_INT_TARGET_MASK_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
427#define ISR_INT_TARGET_STATUS_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
428#define ISR_INT_TARGET_MASK_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
429#define ISR_INT_TARGET_STATUS_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
430#define ISR_INT_TARGET_MASK_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
431#define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
432#define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
433
434#define QLCNIC_PCI_MN_2M (0)
435#define QLCNIC_PCI_MS_2M (0x80000)
436#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
437#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
438#define QLCNIC_PCI_2MB_SIZE (0x00200000UL)
439#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
440#define QLCNIC_PCI_CAMQM_2M_END (0x04800800UL)
441
442#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
443
444#define QLCNIC_ADDR_DDR_NET (0x0000000000000000ULL)
445#define QLCNIC_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
446#define QLCNIC_ADDR_OCM0 (0x0000000200000000ULL)
447#define QLCNIC_ADDR_OCM0_MAX (0x00000002000fffffULL)
448#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL)
449#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL)
450#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL)
451#define QLCNIC_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL)
452
453/*
454 * Register offsets for MN
455 */
456#define QLCNIC_MIU_CONTROL (0x000)
457#define QLCNIC_MIU_MN_CONTROL (QLCNIC_CRB_DDR_NET+QLCNIC_MIU_CONTROL)
458
459/* 200ms delay in each loop */
460#define QLCNIC_NIU_PHY_WAITLEN 200000
461/* 10 seconds before we give up */
462#define QLCNIC_NIU_PHY_WAITMAX 50
463#define QLCNIC_NIU_MAX_GBE_PORTS 4
464#define QLCNIC_NIU_MAX_XG_PORTS 2
465
466#define QLCNIC_NIU_MODE (QLCNIC_CRB_NIU + 0x00000)
467#define QLCNIC_NIU_GB_PAUSE_CTL (QLCNIC_CRB_NIU + 0x0030c)
468#define QLCNIC_NIU_XG_PAUSE_CTL (QLCNIC_CRB_NIU + 0x00098)
469
470#define QLCNIC_NIU_GB_MAC_CONFIG_0(I) \
471 (QLCNIC_CRB_NIU + 0x30000 + (I)*0x10000)
472#define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \
473 (QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000)
474
475
476#define TEST_AGT_CTRL (0x00)
477
478#define TA_CTL_START 1
479#define TA_CTL_ENABLE 2
480#define TA_CTL_WRITE 4
481#define TA_CTL_BUSY 8
482
483/*
484 * Register offsets for MN
485 */
486#define MIU_TEST_AGT_BASE (0x90)
487
488#define MIU_TEST_AGT_ADDR_LO (0x04)
489#define MIU_TEST_AGT_ADDR_HI (0x08)
490#define MIU_TEST_AGT_WRDATA_LO (0x10)
491#define MIU_TEST_AGT_WRDATA_HI (0x14)
492#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20)
493#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24)
494#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1)))
495#define MIU_TEST_AGT_RDDATA_LO (0x18)
496#define MIU_TEST_AGT_RDDATA_HI (0x1c)
497#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28)
498#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c)
499#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1)))
500
501#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
502#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
503
504/*
505 * Register offsets for MS
506 */
507#define SIU_TEST_AGT_BASE (0x60)
508
509#define SIU_TEST_AGT_ADDR_LO (0x04)
510#define SIU_TEST_AGT_ADDR_HI (0x18)
511#define SIU_TEST_AGT_WRDATA_LO (0x08)
512#define SIU_TEST_AGT_WRDATA_HI (0x0c)
513#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i)))
514#define SIU_TEST_AGT_RDDATA_LO (0x10)
515#define SIU_TEST_AGT_RDDATA_HI (0x14)
516#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i)))
517
518#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
519#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
520
521/* XG Link status */
522#define XG_LINK_UP 0x10
523#define XG_LINK_DOWN 0x20
524
525#define XG_LINK_UP_P3 0x01
526#define XG_LINK_DOWN_P3 0x02
527#define XG_LINK_STATE_P3_MASK 0xf
528#define XG_LINK_STATE_P3(pcifn, val) \
529 (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK)
530
531#define P3_LINK_SPEED_MHZ 100
532#define P3_LINK_SPEED_MASK 0xff
533#define P3_LINK_SPEED_REG(pcifn) \
534 (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
535#define P3_LINK_SPEED_VAL(pcifn, reg) \
536 (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK)
537
538#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000)
539#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg))
540#define QLCNIC_FW_VERSION_MAJOR (QLCNIC_CAM_RAM(0x150))
541#define QLCNIC_FW_VERSION_MINOR (QLCNIC_CAM_RAM(0x154))
542#define QLCNIC_FW_VERSION_SUB (QLCNIC_CAM_RAM(0x158))
543#define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100))
544#define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120))
545#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124))
546
547#define NIC_CRB_BASE (QLCNIC_CAM_RAM(0x200))
548#define NIC_CRB_BASE_2 (QLCNIC_CAM_RAM(0x700))
549#define QLCNIC_REG(X) (NIC_CRB_BASE+(X))
550#define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X))
551
552#define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18))
553#define QLCNIC_ARG1_CRB_OFFSET (QLCNIC_REG(0x1c))
554#define QLCNIC_ARG2_CRB_OFFSET (QLCNIC_REG(0x20))
555#define QLCNIC_ARG3_CRB_OFFSET (QLCNIC_REG(0x24))
556#define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28))
557
558#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50))
559#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c))
560
561#define CRB_XG_STATE_P3 (QLCNIC_REG(0x98))
562#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
563#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec))
564
565#define CRB_MPORT_MODE (QLCNIC_REG(0xc4))
566#define CRB_DMA_SHIFT (QLCNIC_REG(0xcc))
567
568#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4))
569
570#define CRB_V2P_0 (QLCNIC_REG(0x290))
571#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
572#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
573
574#define CRB_SW_INT_MASK_0 (QLCNIC_REG(0x1d8))
575#define CRB_SW_INT_MASK_1 (QLCNIC_REG(0x1e0))
576#define CRB_SW_INT_MASK_2 (QLCNIC_REG(0x1e4))
577#define CRB_SW_INT_MASK_3 (QLCNIC_REG(0x1e8))
578
579#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
580#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
581
582/*
583 * capabilities register, can be used to selectively enable/disable features
584 * for backward compability
585 */
586#define CRB_NIC_CAPABILITIES_HOST QLCNIC_REG(0x1a8)
587#define CRB_NIC_CAPABILITIES_FW QLCNIC_REG(0x1dc)
588#define CRB_NIC_MSI_MODE_HOST QLCNIC_REG(0x270)
589#define CRB_NIC_MSI_MODE_FW QLCNIC_REG(0x274)
590
591#define INTR_SCHEME_PERPORT 0x1
592#define MSI_MODE_MULTIFUNC 0x1
593
594/* used for ethtool tests */
595#define CRB_SCRATCHPAD_TEST QLCNIC_REG(0x280)
596
597/*
598 * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
599 * which can be read by the Phantom host to get producer/consumer indexes from
600 * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
601 * registers will be used for the addresses of the ring's shared memory
602 * on the Phantom.
603 */
604
605#define qlcnic_get_temp_val(x) ((x) >> 16)
606#define qlcnic_get_temp_state(x) ((x) & 0xffff)
607#define qlcnic_encode_temp(val, state) (((val) << 16) | (state))
608
609/*
610 * Temperature control.
611 */
612enum {
613 QLCNIC_TEMP_NORMAL = 0x1, /* Normal operating range */
614 QLCNIC_TEMP_WARN, /* Sound alert, temperature getting high */
615 QLCNIC_TEMP_PANIC /* Fatal error, hardware has shut down. */
616};
617
618/* Lock IDs for PHY lock */
619#define PHY_LOCK_DRIVER 0x44524956
620
621/* Used for PS PCI Memory access */
622#define PCIX_PS_OP_ADDR_LO (0x10000)
623/* via CRB (PS side only) */
624#define PCIX_PS_OP_ADDR_HI (0x10004)
625
626#define PCIX_INT_VECTOR (0x10100)
627#define PCIX_INT_MASK (0x10104)
628
629#define PCIX_OCM_WINDOW (0x10800)
630#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x20 * (func))
631
632#define PCIX_TARGET_STATUS (0x10118)
633#define PCIX_TARGET_STATUS_F1 (0x10160)
634#define PCIX_TARGET_STATUS_F2 (0x10164)
635#define PCIX_TARGET_STATUS_F3 (0x10168)
636#define PCIX_TARGET_STATUS_F4 (0x10360)
637#define PCIX_TARGET_STATUS_F5 (0x10364)
638#define PCIX_TARGET_STATUS_F6 (0x10368)
639#define PCIX_TARGET_STATUS_F7 (0x1036c)
640
641#define PCIX_TARGET_MASK (0x10128)
642#define PCIX_TARGET_MASK_F1 (0x10170)
643#define PCIX_TARGET_MASK_F2 (0x10174)
644#define PCIX_TARGET_MASK_F3 (0x10178)
645#define PCIX_TARGET_MASK_F4 (0x10370)
646#define PCIX_TARGET_MASK_F5 (0x10374)
647#define PCIX_TARGET_MASK_F6 (0x10378)
648#define PCIX_TARGET_MASK_F7 (0x1037c)
649
650#define PCIX_MSI_F(i) (0x13000+((i)*4))
651
652#define QLCNIC_PCIX_PH_REG(reg) (QLCNIC_CRB_PCIE + (reg))
653#define QLCNIC_PCIX_PS_REG(reg) (QLCNIC_CRB_PCIX_MD + (reg))
654#define QLCNIC_PCIE_REG(reg) (QLCNIC_CRB_PCIE + (reg))
655
656#define PCIE_SEM0_LOCK (0x1c000)
657#define PCIE_SEM0_UNLOCK (0x1c004)
658#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N))
659#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N))
660
661#define PCIE_SETUP_FUNCTION (0x12040)
662#define PCIE_SETUP_FUNCTION2 (0x12048)
663#define PCIE_MISCCFG_RC (0x1206c)
664#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
665#define PCIE_CHICKEN3 (0x120c8)
666
667#define ISR_INT_STATE_REG (QLCNIC_PCIX_PS_REG(PCIE_MISCCFG_RC))
668#define PCIE_MAX_MASTER_SPLIT (0x14048)
669
670#define QLCNIC_PORT_MODE_NONE 0
671#define QLCNIC_PORT_MODE_XG 1
672#define QLCNIC_PORT_MODE_GB 2
673#define QLCNIC_PORT_MODE_802_3_AP 3
674#define QLCNIC_PORT_MODE_AUTO_NEG 4
675#define QLCNIC_PORT_MODE_AUTO_NEG_1G 5
676#define QLCNIC_PORT_MODE_AUTO_NEG_XG 6
677#define QLCNIC_PORT_MODE_ADDR (QLCNIC_CAM_RAM(0x24))
678#define QLCNIC_WOL_PORT_MODE (QLCNIC_CAM_RAM(0x198))
679
680#define QLCNIC_WOL_CONFIG_NV (QLCNIC_CAM_RAM(0x184))
681#define QLCNIC_WOL_CONFIG (QLCNIC_CAM_RAM(0x188))
682
683#define QLCNIC_PEG_TUNE_MN_PRESENT 0x1
684#define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c))
685
686#define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14))
687#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0))
688#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8))
689#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac))
690#define QLCNIC_CRB_DEV_REF_COUNT (QLCNIC_CAM_RAM(0x138))
691#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140))
692
693#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
694#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
695#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
696#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x14c))
697
698 /* Device State */
699#define QLCNIC_DEV_COLD 1
700#define QLCNIC_DEV_INITALIZING 2
701#define QLCNIC_DEV_READY 3
702#define QLCNIC_DEV_NEED_RESET 4
703#define QLCNIC_DEV_NEED_QUISCENT 5
704#define QLCNIC_DEV_FAILED 6
705
706#define QLCNIC_RCODE_DRIVER_INFO 0x20000000
707#define QLCNIC_RCODE_DRIVER_CAN_RELOAD 0x40000000
708#define QLCNIC_RCODE_FATAL_ERROR 0x80000000
709#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
710#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
711
712#define FW_POLL_DELAY (2 * HZ)
713#define FW_FAIL_THRESH 3
714#define FW_POLL_THRESH 10
715
716#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
717#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
718
719/*
720 * PCI Interrupt Vector Values.
721 */
722#define PCIX_INT_VECTOR_BIT_F0 0x0080
723#define PCIX_INT_VECTOR_BIT_F1 0x0100
724#define PCIX_INT_VECTOR_BIT_F2 0x0200
725#define PCIX_INT_VECTOR_BIT_F3 0x0400
726#define PCIX_INT_VECTOR_BIT_F4 0x0800
727#define PCIX_INT_VECTOR_BIT_F5 0x1000
728#define PCIX_INT_VECTOR_BIT_F6 0x2000
729#define PCIX_INT_VECTOR_BIT_F7 0x4000
730
731struct qlcnic_legacy_intr_set {
732 u32 int_vec_bit;
733 u32 tgt_status_reg;
734 u32 tgt_mask_reg;
735 u32 pci_int_reg;
736};
737
738#define QLCNIC_LEGACY_INTR_CONFIG \
739{ \
740 { \
741 .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
742 .tgt_status_reg = ISR_INT_TARGET_STATUS, \
743 .tgt_mask_reg = ISR_INT_TARGET_MASK, \
744 .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
745 \
746 { \
747 .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
748 .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
749 .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
750 .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
751 \
752 { \
753 .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
754 .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
755 .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
756 .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
757 \
758 { \
759 .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
760 .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
761 .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
762 .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
763 \
764 { \
765 .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
766 .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
767 .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
768 .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
769 \
770 { \
771 .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
772 .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
773 .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
774 .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
775 \
776 { \
777 .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
778 .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
779 .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
780 .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
781 \
782 { \
783 .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
784 .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
785 .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
786 .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
787}
788
789/* NIU REGS */
790
791#define _qlcnic_crb_get_bit(var, bit) ((var >> bit) & 0x1)
792
793/*
794 * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3)
795 *
796 * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable
797 * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream
798 * Bit 2 : enable_rx => 1:enable frame recv, 0:disable
799 * Bit 3 : rx_synced => R/O: recv enable synched to recv stream
800 * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable
801 * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore
802 * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal
803 * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op
804 * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op
805 * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op
806 * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op
807 * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op
808 */
809#define qlcnic_gb_rx_flowctl(config_word) \
810 ((config_word) |= 1 << 5)
811#define qlcnic_gb_get_rx_flowctl(config_word) \
812 _qlcnic_crb_get_bit((config_word), 5)
813#define qlcnic_gb_unset_rx_flowctl(config_word) \
814 ((config_word) &= ~(1 << 5))
815
816/*
817 * NIU GB Pause Ctl Register
818 */
819
820#define qlcnic_gb_set_gb0_mask(config_word) \
821 ((config_word) |= 1 << 0)
822#define qlcnic_gb_set_gb1_mask(config_word) \
823 ((config_word) |= 1 << 2)
824#define qlcnic_gb_set_gb2_mask(config_word) \
825 ((config_word) |= 1 << 4)
826#define qlcnic_gb_set_gb3_mask(config_word) \
827 ((config_word) |= 1 << 6)
828
829#define qlcnic_gb_get_gb0_mask(config_word) \
830 _qlcnic_crb_get_bit((config_word), 0)
831#define qlcnic_gb_get_gb1_mask(config_word) \
832 _qlcnic_crb_get_bit((config_word), 2)
833#define qlcnic_gb_get_gb2_mask(config_word) \
834 _qlcnic_crb_get_bit((config_word), 4)
835#define qlcnic_gb_get_gb3_mask(config_word) \
836 _qlcnic_crb_get_bit((config_word), 6)
837
838#define qlcnic_gb_unset_gb0_mask(config_word) \
839 ((config_word) &= ~(1 << 0))
840#define qlcnic_gb_unset_gb1_mask(config_word) \
841 ((config_word) &= ~(1 << 2))
842#define qlcnic_gb_unset_gb2_mask(config_word) \
843 ((config_word) &= ~(1 << 4))
844#define qlcnic_gb_unset_gb3_mask(config_word) \
845 ((config_word) &= ~(1 << 6))
846
847/*
848 * NIU XG Pause Ctl Register
849 *
850 * Bit 0 : xg0_mask => 1:disable tx pause frames
851 * Bit 1 : xg0_request => 1:request single pause frame
852 * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
853 * Bit 3 : xg1_mask => 1:disable tx pause frames
854 * Bit 4 : xg1_request => 1:request single pause frame
855 * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
856 */
857
858#define qlcnic_xg_set_xg0_mask(config_word) \
859 ((config_word) |= 1 << 0)
860#define qlcnic_xg_set_xg1_mask(config_word) \
861 ((config_word) |= 1 << 3)
862
863#define qlcnic_xg_get_xg0_mask(config_word) \
864 _qlcnic_crb_get_bit((config_word), 0)
865#define qlcnic_xg_get_xg1_mask(config_word) \
866 _qlcnic_crb_get_bit((config_word), 3)
867
868#define qlcnic_xg_unset_xg0_mask(config_word) \
869 ((config_word) &= ~(1 << 0))
870#define qlcnic_xg_unset_xg1_mask(config_word) \
871 ((config_word) &= ~(1 << 3))
872
873/*
874 * NIU XG Pause Ctl Register
875 *
876 * Bit 0 : xg0_mask => 1:disable tx pause frames
877 * Bit 1 : xg0_request => 1:request single pause frame
878 * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
879 * Bit 3 : xg1_mask => 1:disable tx pause frames
880 * Bit 4 : xg1_request => 1:request single pause frame
881 * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
882 */
883
884/*
885 * PHY-Specific MII control/status registers.
886 */
887#define QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG 4
888#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17
889
890/*
891 * PHY-Specific Status Register (reg 17).
892 *
893 * Bit 0 : jabber => 1:jabber detected, 0:not
894 * Bit 1 : polarity => 1:polarity reversed, 0:normal
895 * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled
896 * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled
897 * Bit 4 : energydetect => 1:sleep, 0:active
898 * Bit 5 : downshift => 1:downshift, 0:no downshift
899 * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover)
900 * Bits 7-9 : cablelen => not valid in 10Mb/s mode
901 * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m
902 * Bit 10 : link => 1:link up, 0:link down
903 * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet
904 * Bit 12 : pagercvd => 1:page received, 0:page not received
905 * Bit 13 : duplex => 1:full duplex, 0:half duplex
906 * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd
907 */
908
909#define qlcnic_get_phy_speed(config_word) (((config_word) >> 14) & 0x03)
910
911#define qlcnic_set_phy_speed(config_word, val) \
912 ((config_word) |= ((val & 0x03) << 14))
913#define qlcnic_set_phy_duplex(config_word) \
914 ((config_word) |= 1 << 13)
915#define qlcnic_clear_phy_duplex(config_word) \
916 ((config_word) &= ~(1 << 13))
917
918#define qlcnic_get_phy_link(config_word) \
919 _qlcnic_crb_get_bit(config_word, 10)
920#define qlcnic_get_phy_duplex(config_word) \
921 _qlcnic_crb_get_bit(config_word, 13)
922
923#define QLCNIC_NIU_NON_PROMISC_MODE 0
924#define QLCNIC_NIU_PROMISC_MODE 1
925#define QLCNIC_NIU_ALLMULTI_MODE 2
926
927struct crb_128M_2M_sub_block_map {
928 unsigned valid;
929 unsigned start_128M;
930 unsigned end_128M;
931 unsigned start_2M;
932};
933
934struct crb_128M_2M_block_map{
935 struct crb_128M_2M_sub_block_map sub_block[16];
936};
937#endif /* __QLCNIC_HDR_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
new file mode 100644
index 000000000000..99a4d1379d00
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -0,0 +1,1274 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include "qlcnic.h"
26
27#include <net/ip.h>
28
29#define MASK(n) ((1ULL<<(n))-1)
30#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
31
32#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
33
34#define CRB_BLK(off) ((off >> 20) & 0x3f)
35#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
36#define CRB_WINDOW_2M (0x130060)
37#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
38#define CRB_INDIRECT_2M (0x1e0000UL)
39
40
41#ifndef readq
42static inline u64 readq(void __iomem *addr)
43{
44 return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
45}
46#endif
47
48#ifndef writeq
49static inline void writeq(u64 val, void __iomem *addr)
50{
51 writel(((u32) (val)), (addr));
52 writel(((u32) (val >> 32)), (addr + 4));
53}
54#endif
55
56#define ADDR_IN_RANGE(addr, low, high) \
57 (((addr) < (high)) && ((addr) >= (low)))
58
59#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
60 ((adapter)->ahw.pci_base0 + (off))
61
62static void __iomem *pci_base_offset(struct qlcnic_adapter *adapter,
63 unsigned long off)
64{
65 if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END))
66 return PCI_OFFSET_FIRST_RANGE(adapter, off);
67
68 return NULL;
69}
70
71static const struct crb_128M_2M_block_map
72crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
73 {{{0, 0, 0, 0} } }, /* 0: PCI */
74 {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
75 {1, 0x0110000, 0x0120000, 0x130000},
76 {1, 0x0120000, 0x0122000, 0x124000},
77 {1, 0x0130000, 0x0132000, 0x126000},
78 {1, 0x0140000, 0x0142000, 0x128000},
79 {1, 0x0150000, 0x0152000, 0x12a000},
80 {1, 0x0160000, 0x0170000, 0x110000},
81 {1, 0x0170000, 0x0172000, 0x12e000},
82 {0, 0x0000000, 0x0000000, 0x000000},
83 {0, 0x0000000, 0x0000000, 0x000000},
84 {0, 0x0000000, 0x0000000, 0x000000},
85 {0, 0x0000000, 0x0000000, 0x000000},
86 {0, 0x0000000, 0x0000000, 0x000000},
87 {0, 0x0000000, 0x0000000, 0x000000},
88 {1, 0x01e0000, 0x01e0800, 0x122000},
89 {0, 0x0000000, 0x0000000, 0x000000} } },
90 {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
91 {{{0, 0, 0, 0} } }, /* 3: */
92 {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
93 {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
94 {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
95 {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
96 {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
97 {0, 0x0000000, 0x0000000, 0x000000},
98 {0, 0x0000000, 0x0000000, 0x000000},
99 {0, 0x0000000, 0x0000000, 0x000000},
100 {0, 0x0000000, 0x0000000, 0x000000},
101 {0, 0x0000000, 0x0000000, 0x000000},
102 {0, 0x0000000, 0x0000000, 0x000000},
103 {0, 0x0000000, 0x0000000, 0x000000},
104 {0, 0x0000000, 0x0000000, 0x000000},
105 {0, 0x0000000, 0x0000000, 0x000000},
106 {0, 0x0000000, 0x0000000, 0x000000},
107 {0, 0x0000000, 0x0000000, 0x000000},
108 {0, 0x0000000, 0x0000000, 0x000000},
109 {0, 0x0000000, 0x0000000, 0x000000},
110 {0, 0x0000000, 0x0000000, 0x000000},
111 {1, 0x08f0000, 0x08f2000, 0x172000} } },
112 {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
113 {0, 0x0000000, 0x0000000, 0x000000},
114 {0, 0x0000000, 0x0000000, 0x000000},
115 {0, 0x0000000, 0x0000000, 0x000000},
116 {0, 0x0000000, 0x0000000, 0x000000},
117 {0, 0x0000000, 0x0000000, 0x000000},
118 {0, 0x0000000, 0x0000000, 0x000000},
119 {0, 0x0000000, 0x0000000, 0x000000},
120 {0, 0x0000000, 0x0000000, 0x000000},
121 {0, 0x0000000, 0x0000000, 0x000000},
122 {0, 0x0000000, 0x0000000, 0x000000},
123 {0, 0x0000000, 0x0000000, 0x000000},
124 {0, 0x0000000, 0x0000000, 0x000000},
125 {0, 0x0000000, 0x0000000, 0x000000},
126 {0, 0x0000000, 0x0000000, 0x000000},
127 {1, 0x09f0000, 0x09f2000, 0x176000} } },
128 {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
129 {0, 0x0000000, 0x0000000, 0x000000},
130 {0, 0x0000000, 0x0000000, 0x000000},
131 {0, 0x0000000, 0x0000000, 0x000000},
132 {0, 0x0000000, 0x0000000, 0x000000},
133 {0, 0x0000000, 0x0000000, 0x000000},
134 {0, 0x0000000, 0x0000000, 0x000000},
135 {0, 0x0000000, 0x0000000, 0x000000},
136 {0, 0x0000000, 0x0000000, 0x000000},
137 {0, 0x0000000, 0x0000000, 0x000000},
138 {0, 0x0000000, 0x0000000, 0x000000},
139 {0, 0x0000000, 0x0000000, 0x000000},
140 {0, 0x0000000, 0x0000000, 0x000000},
141 {0, 0x0000000, 0x0000000, 0x000000},
142 {0, 0x0000000, 0x0000000, 0x000000},
143 {1, 0x0af0000, 0x0af2000, 0x17a000} } },
144 {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
145 {0, 0x0000000, 0x0000000, 0x000000},
146 {0, 0x0000000, 0x0000000, 0x000000},
147 {0, 0x0000000, 0x0000000, 0x000000},
148 {0, 0x0000000, 0x0000000, 0x000000},
149 {0, 0x0000000, 0x0000000, 0x000000},
150 {0, 0x0000000, 0x0000000, 0x000000},
151 {0, 0x0000000, 0x0000000, 0x000000},
152 {0, 0x0000000, 0x0000000, 0x000000},
153 {0, 0x0000000, 0x0000000, 0x000000},
154 {0, 0x0000000, 0x0000000, 0x000000},
155 {0, 0x0000000, 0x0000000, 0x000000},
156 {0, 0x0000000, 0x0000000, 0x000000},
157 {0, 0x0000000, 0x0000000, 0x000000},
158 {0, 0x0000000, 0x0000000, 0x000000},
159 {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
160 {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
161 {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
162 {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
163 {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
164 {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
165 {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
166 {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
167 {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
168 {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
169 {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
170 {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
171 {{{0, 0, 0, 0} } }, /* 23: */
172 {{{0, 0, 0, 0} } }, /* 24: */
173 {{{0, 0, 0, 0} } }, /* 25: */
174 {{{0, 0, 0, 0} } }, /* 26: */
175 {{{0, 0, 0, 0} } }, /* 27: */
176 {{{0, 0, 0, 0} } }, /* 28: */
177 {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
178 {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
179 {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
180 {{{0} } }, /* 32: PCI */
181 {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
182 {1, 0x2110000, 0x2120000, 0x130000},
183 {1, 0x2120000, 0x2122000, 0x124000},
184 {1, 0x2130000, 0x2132000, 0x126000},
185 {1, 0x2140000, 0x2142000, 0x128000},
186 {1, 0x2150000, 0x2152000, 0x12a000},
187 {1, 0x2160000, 0x2170000, 0x110000},
188 {1, 0x2170000, 0x2172000, 0x12e000},
189 {0, 0x0000000, 0x0000000, 0x000000},
190 {0, 0x0000000, 0x0000000, 0x000000},
191 {0, 0x0000000, 0x0000000, 0x000000},
192 {0, 0x0000000, 0x0000000, 0x000000},
193 {0, 0x0000000, 0x0000000, 0x000000},
194 {0, 0x0000000, 0x0000000, 0x000000},
195 {0, 0x0000000, 0x0000000, 0x000000},
196 {0, 0x0000000, 0x0000000, 0x000000} } },
197 {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
198 {{{0} } }, /* 35: */
199 {{{0} } }, /* 36: */
200 {{{0} } }, /* 37: */
201 {{{0} } }, /* 38: */
202 {{{0} } }, /* 39: */
203 {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
204 {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
205 {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
206 {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
207 {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
208 {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
209 {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
210 {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
211 {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
212 {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
213 {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
214 {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
215 {{{0} } }, /* 52: */
216 {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
217 {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
218 {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
219 {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
220 {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
221 {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
222 {{{0} } }, /* 59: I2C0 */
223 {{{0} } }, /* 60: I2C1 */
224 {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
225 {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
226 {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
227};
228
229/*
230 * top 12 bits of crb internal address (hub, agent)
231 */
232static const unsigned crb_hub_agt[64] = {
233 0,
234 QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
235 QLCNIC_HW_CRB_HUB_AGT_ADR_MN,
236 QLCNIC_HW_CRB_HUB_AGT_ADR_MS,
237 0,
238 QLCNIC_HW_CRB_HUB_AGT_ADR_SRE,
239 QLCNIC_HW_CRB_HUB_AGT_ADR_NIU,
240 QLCNIC_HW_CRB_HUB_AGT_ADR_QMN,
241 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0,
242 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1,
243 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2,
244 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3,
245 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
246 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
247 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
248 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4,
249 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
250 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0,
251 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1,
252 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2,
253 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3,
254 QLCNIC_HW_CRB_HUB_AGT_ADR_PGND,
255 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI,
256 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0,
257 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1,
258 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2,
259 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3,
260 0,
261 QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI,
262 QLCNIC_HW_CRB_HUB_AGT_ADR_SN,
263 0,
264 QLCNIC_HW_CRB_HUB_AGT_ADR_EG,
265 0,
266 QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
267 QLCNIC_HW_CRB_HUB_AGT_ADR_CAM,
268 0,
269 0,
270 0,
271 0,
272 0,
273 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
274 0,
275 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1,
276 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2,
277 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3,
278 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4,
279 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5,
280 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6,
281 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7,
282 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
283 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
284 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
285 0,
286 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0,
287 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8,
288 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9,
289 QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0,
290 0,
291 QLCNIC_HW_CRB_HUB_AGT_ADR_SMB,
292 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0,
293 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1,
294 0,
295 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC,
296 0,
297};
298
299/* PCI Windowing for DDR regions. */
300
301#define QLCNIC_PCIE_SEM_TIMEOUT 10000
302
303int
304qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
305{
306 int done = 0, timeout = 0;
307
308 while (!done) {
309 done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
310 if (done == 1)
311 break;
312 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT)
313 return -EIO;
314 msleep(1);
315 }
316
317 if (id_reg)
318 QLCWR32(adapter, id_reg, adapter->portnum);
319
320 return 0;
321}
322
323void
324qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
325{
326 QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
327}
328
329static int
330qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
331 struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
332{
333 u32 i, producer, consumer;
334 struct qlcnic_cmd_buffer *pbuf;
335 struct cmd_desc_type0 *cmd_desc;
336 struct qlcnic_host_tx_ring *tx_ring;
337
338 i = 0;
339
340 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
341 return -EIO;
342
343 tx_ring = adapter->tx_ring;
344 __netif_tx_lock_bh(tx_ring->txq);
345
346 producer = tx_ring->producer;
347 consumer = tx_ring->sw_consumer;
348
349 if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
350 netif_tx_stop_queue(tx_ring->txq);
351 __netif_tx_unlock_bh(tx_ring->txq);
352 return -EBUSY;
353 }
354
355 do {
356 cmd_desc = &cmd_desc_arr[i];
357
358 pbuf = &tx_ring->cmd_buf_arr[producer];
359 pbuf->skb = NULL;
360 pbuf->frag_count = 0;
361
362 memcpy(&tx_ring->desc_head[producer],
363 &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
364
365 producer = get_next_index(producer, tx_ring->num_desc);
366 i++;
367
368 } while (i != nr_desc);
369
370 tx_ring->producer = producer;
371
372 qlcnic_update_cmd_producer(adapter, tx_ring);
373
374 __netif_tx_unlock_bh(tx_ring->txq);
375
376 return 0;
377}
378
379static int
380qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
381 unsigned op)
382{
383 struct qlcnic_nic_req req;
384 struct qlcnic_mac_req *mac_req;
385 u64 word;
386
387 memset(&req, 0, sizeof(struct qlcnic_nic_req));
388 req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
389
390 word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16);
391 req.req_hdr = cpu_to_le64(word);
392
393 mac_req = (struct qlcnic_mac_req *)&req.words[0];
394 mac_req->op = op;
395 memcpy(mac_req->mac_addr, addr, 6);
396
397 return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
398}
399
400static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter,
401 u8 *addr, struct list_head *del_list)
402{
403 struct list_head *head;
404 struct qlcnic_mac_list_s *cur;
405
406 /* look up if already exists */
407 list_for_each(head, del_list) {
408 cur = list_entry(head, struct qlcnic_mac_list_s, list);
409
410 if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
411 list_move_tail(head, &adapter->mac_list);
412 return 0;
413 }
414 }
415
416 cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
417 if (cur == NULL) {
418 dev_err(&adapter->netdev->dev,
419 "failed to add mac address filter\n");
420 return -ENOMEM;
421 }
422 memcpy(cur->mac_addr, addr, ETH_ALEN);
423 list_add_tail(&cur->list, &adapter->mac_list);
424
425 return qlcnic_sre_macaddr_change(adapter,
426 cur->mac_addr, QLCNIC_MAC_ADD);
427}
428
429void qlcnic_set_multi(struct net_device *netdev)
430{
431 struct qlcnic_adapter *adapter = netdev_priv(netdev);
432 struct dev_mc_list *mc_ptr;
433 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
434 u32 mode = VPORT_MISS_MODE_DROP;
435 LIST_HEAD(del_list);
436 struct list_head *head;
437 struct qlcnic_mac_list_s *cur;
438
439 list_splice_tail_init(&adapter->mac_list, &del_list);
440
441 qlcnic_nic_add_mac(adapter, adapter->mac_addr, &del_list);
442 qlcnic_nic_add_mac(adapter, bcast_addr, &del_list);
443
444 if (netdev->flags & IFF_PROMISC) {
445 mode = VPORT_MISS_MODE_ACCEPT_ALL;
446 goto send_fw_cmd;
447 }
448
449 if ((netdev->flags & IFF_ALLMULTI) ||
450 (netdev_mc_count(netdev) > adapter->max_mc_count)) {
451 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
452 goto send_fw_cmd;
453 }
454
455 if (!netdev_mc_empty(netdev)) {
456 netdev_for_each_mc_addr(mc_ptr, netdev) {
457 qlcnic_nic_add_mac(adapter, mc_ptr->dmi_addr,
458 &del_list);
459 }
460 }
461
462send_fw_cmd:
463 qlcnic_nic_set_promisc(adapter, mode);
464 head = &del_list;
465 while (!list_empty(head)) {
466 cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
467
468 qlcnic_sre_macaddr_change(adapter,
469 cur->mac_addr, QLCNIC_MAC_DEL);
470 list_del(&cur->list);
471 kfree(cur);
472 }
473}
474
475int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
476{
477 struct qlcnic_nic_req req;
478 u64 word;
479
480 memset(&req, 0, sizeof(struct qlcnic_nic_req));
481
482 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
483
484 word = QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE |
485 ((u64)adapter->portnum << 16);
486 req.req_hdr = cpu_to_le64(word);
487
488 req.words[0] = cpu_to_le64(mode);
489
490 return qlcnic_send_cmd_descs(adapter,
491 (struct cmd_desc_type0 *)&req, 1);
492}
493
494void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
495{
496 struct qlcnic_mac_list_s *cur;
497 struct list_head *head = &adapter->mac_list;
498
499 while (!list_empty(head)) {
500 cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
501 qlcnic_sre_macaddr_change(adapter,
502 cur->mac_addr, QLCNIC_MAC_DEL);
503 list_del(&cur->list);
504 kfree(cur);
505 }
506}
507
508#define QLCNIC_CONFIG_INTR_COALESCE 3
509
510/*
511 * Send the interrupt coalescing parameter set by ethtool to the card.
512 */
513int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
514{
515 struct qlcnic_nic_req req;
516 u64 word[6];
517 int rv, i;
518
519 memset(&req, 0, sizeof(struct qlcnic_nic_req));
520
521 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
522
523 word[0] = QLCNIC_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
524 req.req_hdr = cpu_to_le64(word[0]);
525
526 memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
527 for (i = 0; i < 6; i++)
528 req.words[i] = cpu_to_le64(word[i]);
529
530 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
531 if (rv != 0)
532 dev_err(&adapter->netdev->dev,
533 "Could not send interrupt coalescing parameters\n");
534
535 return rv;
536}
537
538int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
539{
540 struct qlcnic_nic_req req;
541 u64 word;
542 int rv;
543
544 if ((adapter->flags & QLCNIC_LRO_ENABLED) == enable)
545 return 0;
546
547 memset(&req, 0, sizeof(struct qlcnic_nic_req));
548
549 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
550
551 word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
552 req.req_hdr = cpu_to_le64(word);
553
554 req.words[0] = cpu_to_le64(enable);
555
556 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
557 if (rv != 0)
558 dev_err(&adapter->netdev->dev,
559 "Could not send configure hw lro request\n");
560
561 adapter->flags ^= QLCNIC_LRO_ENABLED;
562
563 return rv;
564}
565
566int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable)
567{
568 struct qlcnic_nic_req req;
569 u64 word;
570 int rv;
571
572 if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable)
573 return 0;
574
575 memset(&req, 0, sizeof(struct qlcnic_nic_req));
576
577 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
578
579 word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING |
580 ((u64)adapter->portnum << 16);
581 req.req_hdr = cpu_to_le64(word);
582
583 req.words[0] = cpu_to_le64(enable);
584
585 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
586 if (rv != 0)
587 dev_err(&adapter->netdev->dev,
588 "Could not send configure bridge mode request\n");
589
590 adapter->flags ^= QLCNIC_BRIDGE_ENABLED;
591
592 return rv;
593}
594
595
596#define RSS_HASHTYPE_IP_TCP 0x3
597
598int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
599{
600 struct qlcnic_nic_req req;
601 u64 word;
602 int i, rv;
603
604 const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
605 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
606 0x255b0ec26d5a56daULL };
607
608
609 memset(&req, 0, sizeof(struct qlcnic_nic_req));
610 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
611
612 word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
613 req.req_hdr = cpu_to_le64(word);
614
615 /*
616 * RSS request:
617 * bits 3-0: hash_method
618 * 5-4: hash_type_ipv4
619 * 7-6: hash_type_ipv6
620 * 8: enable
621 * 9: use indirection table
622 * 47-10: reserved
623 * 63-48: indirection table mask
624 */
625 word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
626 ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
627 ((u64)(enable & 0x1) << 8) |
628 ((0x7ULL) << 48);
629 req.words[0] = cpu_to_le64(word);
630 for (i = 0; i < 5; i++)
631 req.words[i+1] = cpu_to_le64(key[i]);
632
633 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
634 if (rv != 0)
635 dev_err(&adapter->netdev->dev, "could not configure RSS\n");
636
637 return rv;
638}
639
640int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd)
641{
642 struct qlcnic_nic_req req;
643 u64 word;
644 int rv;
645
646 memset(&req, 0, sizeof(struct qlcnic_nic_req));
647 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
648
649 word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
650 req.req_hdr = cpu_to_le64(word);
651
652 req.words[0] = cpu_to_le64(cmd);
653 req.words[1] = cpu_to_le64(ip);
654
655 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
656 if (rv != 0)
657 dev_err(&adapter->netdev->dev,
658 "could not notify %s IP 0x%x reuqest\n",
659 (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
660
661 return rv;
662}
663
664int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
665{
666 struct qlcnic_nic_req req;
667 u64 word;
668 int rv;
669
670 memset(&req, 0, sizeof(struct qlcnic_nic_req));
671 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
672
673 word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
674 req.req_hdr = cpu_to_le64(word);
675 req.words[0] = cpu_to_le64(enable | (enable << 8));
676
677 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
678 if (rv != 0)
679 dev_err(&adapter->netdev->dev,
680 "could not configure link notification\n");
681
682 return rv;
683}
684
685int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
686{
687 struct qlcnic_nic_req req;
688 u64 word;
689 int rv;
690
691 memset(&req, 0, sizeof(struct qlcnic_nic_req));
692 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
693
694 word = QLCNIC_H2C_OPCODE_LRO_REQUEST |
695 ((u64)adapter->portnum << 16) |
696 ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ;
697
698 req.req_hdr = cpu_to_le64(word);
699
700 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
701 if (rv != 0)
702 dev_err(&adapter->netdev->dev,
703 "could not cleanup lro flows\n");
704
705 return rv;
706}
707
708/*
709 * qlcnic_change_mtu - Change the Maximum Transfer Unit
710 * @returns 0 on success, negative on failure
711 */
712
713int qlcnic_change_mtu(struct net_device *netdev, int mtu)
714{
715 struct qlcnic_adapter *adapter = netdev_priv(netdev);
716 int rc = 0;
717
718 if (mtu > P3_MAX_MTU) {
719 dev_err(&adapter->netdev->dev, "mtu > %d bytes unsupported\n",
720 P3_MAX_MTU);
721 return -EINVAL;
722 }
723
724 rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
725
726 if (!rc)
727 netdev->mtu = mtu;
728
729 return rc;
730}
731
732int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac)
733{
734 u32 crbaddr, mac_hi, mac_lo;
735 int pci_func = adapter->ahw.pci_func;
736
737 crbaddr = CRB_MAC_BLOCK_START +
738 (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
739
740 mac_lo = QLCRD32(adapter, crbaddr);
741 mac_hi = QLCRD32(adapter, crbaddr+4);
742
743 if (pci_func & 1)
744 *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
745 else
746 *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32));
747
748 return 0;
749}
750
751/*
752 * Changes the CRB window to the specified window.
753 */
754 /* Returns < 0 if off is not valid,
755 * 1 if window access is needed. 'off' is set to offset from
756 * CRB space in 128M pci map
757 * 0 if no window access is needed. 'off' is set to 2M addr
758 * In: 'off' is offset from base in 128M pci map
759 */
760static int
761qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
762 ulong off, void __iomem **addr)
763{
764 const struct crb_128M_2M_sub_block_map *m;
765
766 if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE))
767 return -EINVAL;
768
769 off -= QLCNIC_PCI_CRBSPACE;
770
771 /*
772 * Try direct map
773 */
774 m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
775
776 if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
777 *addr = adapter->ahw.pci_base0 + m->start_2M +
778 (off - m->start_128M);
779 return 0;
780 }
781
782 /*
783 * Not in direct map, use crb window
784 */
785 *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
786 return 1;
787}
788
789/*
790 * In: 'off' is offset from CRB space in 128M pci map
791 * Out: 'off' is 2M pci map addr
792 * side effect: lock crb window
793 */
794static void
795qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
796{
797 u32 window;
798 void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M;
799
800 off -= QLCNIC_PCI_CRBSPACE;
801
802 window = CRB_HI(off);
803
804 if (adapter->ahw.crb_win == window)
805 return;
806
807 writel(window, addr);
808 if (readl(addr) != window) {
809 if (printk_ratelimit())
810 dev_warn(&adapter->pdev->dev,
811 "failed to set CRB window to %d off 0x%lx\n",
812 window, off);
813 }
814 adapter->ahw.crb_win = window;
815}
816
817int
818qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
819{
820 unsigned long flags;
821 int rv;
822 void __iomem *addr = NULL;
823
824 rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
825
826 if (rv == 0) {
827 writel(data, addr);
828 return 0;
829 }
830
831 if (rv > 0) {
832 /* indirect access */
833 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
834 crb_win_lock(adapter);
835 qlcnic_pci_set_crbwindow_2M(adapter, off);
836 writel(data, addr);
837 crb_win_unlock(adapter);
838 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
839 return 0;
840 }
841
842 dev_err(&adapter->pdev->dev,
843 "%s: invalid offset: 0x%016lx\n", __func__, off);
844 dump_stack();
845 return -EIO;
846}
847
848u32
849qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
850{
851 unsigned long flags;
852 int rv;
853 u32 data;
854 void __iomem *addr = NULL;
855
856 rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
857
858 if (rv == 0)
859 return readl(addr);
860
861 if (rv > 0) {
862 /* indirect access */
863 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
864 crb_win_lock(adapter);
865 qlcnic_pci_set_crbwindow_2M(adapter, off);
866 data = readl(addr);
867 crb_win_unlock(adapter);
868 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
869 return data;
870 }
871
872 dev_err(&adapter->pdev->dev,
873 "%s: invalid offset: 0x%016lx\n", __func__, off);
874 dump_stack();
875 return -1;
876}
877
878
879void __iomem *
880qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset)
881{
882 void __iomem *addr = NULL;
883
884 WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr));
885
886 return addr;
887}
888
889
890static int
891qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
892 u64 addr, u32 *start)
893{
894 u32 window;
895 struct pci_dev *pdev = adapter->pdev;
896
897 if ((addr & 0x00ff800) == 0xff800) {
898 if (printk_ratelimit())
899 dev_warn(&pdev->dev, "QM access not handled\n");
900 return -EIO;
901 }
902
903 window = OCM_WIN_P3P(addr);
904
905 writel(window, adapter->ahw.ocm_win_crb);
906 /* read back to flush */
907 readl(adapter->ahw.ocm_win_crb);
908
909 adapter->ahw.ocm_win = window;
910 *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
911 return 0;
912}
913
914static int
915qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
916 u64 *data, int op)
917{
918 void __iomem *addr, *mem_ptr = NULL;
919 resource_size_t mem_base;
920 int ret;
921 u32 start;
922
923 mutex_lock(&adapter->ahw.mem_lock);
924
925 ret = qlcnic_pci_set_window_2M(adapter, off, &start);
926 if (ret != 0)
927 goto unlock;
928
929 addr = pci_base_offset(adapter, start);
930 if (addr)
931 goto noremap;
932
933 mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK);
934
935 mem_ptr = ioremap(mem_base, PAGE_SIZE);
936 if (mem_ptr == NULL) {
937 ret = -EIO;
938 goto unlock;
939 }
940
941 addr = mem_ptr + (start & (PAGE_SIZE - 1));
942
943noremap:
944 if (op == 0) /* read */
945 *data = readq(addr);
946 else /* write */
947 writeq(*data, addr);
948
949unlock:
950 mutex_unlock(&adapter->ahw.mem_lock);
951
952 if (mem_ptr)
953 iounmap(mem_ptr);
954 return ret;
955}
956
957#define MAX_CTL_CHECK 1000
958
959int
960qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
961 u64 off, u64 data)
962{
963 int i, j, ret;
964 u32 temp, off8;
965 u64 stride;
966 void __iomem *mem_crb;
967
968 /* Only 64-bit aligned access */
969 if (off & 7)
970 return -EIO;
971
972 /* P3 onward, test agent base for MIU and SIU is same */
973 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
974 QLCNIC_ADDR_QDR_NET_MAX_P3)) {
975 mem_crb = qlcnic_get_ioaddr(adapter,
976 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
977 goto correct;
978 }
979
980 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
981 mem_crb = qlcnic_get_ioaddr(adapter,
982 QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
983 goto correct;
984 }
985
986 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
987 return qlcnic_pci_mem_access_direct(adapter, off, &data, 1);
988
989 return -EIO;
990
991correct:
992 stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
993
994 off8 = off & ~(stride-1);
995
996 mutex_lock(&adapter->ahw.mem_lock);
997
998 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
999 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1000
1001 i = 0;
1002 if (stride == 16) {
1003 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1004 writel((TA_CTL_START | TA_CTL_ENABLE),
1005 (mem_crb + TEST_AGT_CTRL));
1006
1007 for (j = 0; j < MAX_CTL_CHECK; j++) {
1008 temp = readl(mem_crb + TEST_AGT_CTRL);
1009 if ((temp & TA_CTL_BUSY) == 0)
1010 break;
1011 }
1012
1013 if (j >= MAX_CTL_CHECK) {
1014 ret = -EIO;
1015 goto done;
1016 }
1017
1018 i = (off & 0xf) ? 0 : 2;
1019 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
1020 mem_crb + MIU_TEST_AGT_WRDATA(i));
1021 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
1022 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1023 i = (off & 0xf) ? 2 : 0;
1024 }
1025
1026 writel(data & 0xffffffff,
1027 mem_crb + MIU_TEST_AGT_WRDATA(i));
1028 writel((data >> 32) & 0xffffffff,
1029 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1030
1031 writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
1032 writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
1033 (mem_crb + TEST_AGT_CTRL));
1034
1035 for (j = 0; j < MAX_CTL_CHECK; j++) {
1036 temp = readl(mem_crb + TEST_AGT_CTRL);
1037 if ((temp & TA_CTL_BUSY) == 0)
1038 break;
1039 }
1040
1041 if (j >= MAX_CTL_CHECK) {
1042 if (printk_ratelimit())
1043 dev_err(&adapter->pdev->dev,
1044 "failed to write through agent\n");
1045 ret = -EIO;
1046 } else
1047 ret = 0;
1048
1049done:
1050 mutex_unlock(&adapter->ahw.mem_lock);
1051
1052 return ret;
1053}
1054
1055int
1056qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
1057 u64 off, u64 *data)
1058{
1059 int j, ret;
1060 u32 temp, off8;
1061 u64 val, stride;
1062 void __iomem *mem_crb;
1063
1064 /* Only 64-bit aligned access */
1065 if (off & 7)
1066 return -EIO;
1067
1068 /* P3 onward, test agent base for MIU and SIU is same */
1069 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
1070 QLCNIC_ADDR_QDR_NET_MAX_P3)) {
1071 mem_crb = qlcnic_get_ioaddr(adapter,
1072 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
1073 goto correct;
1074 }
1075
1076 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
1077 mem_crb = qlcnic_get_ioaddr(adapter,
1078 QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1079 goto correct;
1080 }
1081
1082 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) {
1083 return qlcnic_pci_mem_access_direct(adapter,
1084 off, data, 0);
1085 }
1086
1087 return -EIO;
1088
1089correct:
1090 stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
1091
1092 off8 = off & ~(stride-1);
1093
1094 mutex_lock(&adapter->ahw.mem_lock);
1095
1096 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1097 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1098 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1099 writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
1100
1101 for (j = 0; j < MAX_CTL_CHECK; j++) {
1102 temp = readl(mem_crb + TEST_AGT_CTRL);
1103 if ((temp & TA_CTL_BUSY) == 0)
1104 break;
1105 }
1106
1107 if (j >= MAX_CTL_CHECK) {
1108 if (printk_ratelimit())
1109 dev_err(&adapter->pdev->dev,
1110 "failed to read through agent\n");
1111 ret = -EIO;
1112 } else {
1113 off8 = MIU_TEST_AGT_RDDATA_LO;
1114 if ((stride == 16) && (off & 0xf))
1115 off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
1116
1117 temp = readl(mem_crb + off8 + 4);
1118 val = (u64)temp << 32;
1119 val |= readl(mem_crb + off8);
1120 *data = val;
1121 ret = 0;
1122 }
1123
1124 mutex_unlock(&adapter->ahw.mem_lock);
1125
1126 return ret;
1127}
1128
1129int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
1130{
1131 int offset, board_type, magic;
1132 struct pci_dev *pdev = adapter->pdev;
1133
1134 offset = QLCNIC_FW_MAGIC_OFFSET;
1135 if (qlcnic_rom_fast_read(adapter, offset, &magic))
1136 return -EIO;
1137
1138 if (magic != QLCNIC_BDINFO_MAGIC) {
1139 dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
1140 magic);
1141 return -EIO;
1142 }
1143
1144 offset = QLCNIC_BRDTYPE_OFFSET;
1145 if (qlcnic_rom_fast_read(adapter, offset, &board_type))
1146 return -EIO;
1147
1148 adapter->ahw.board_type = board_type;
1149
1150 if (board_type == QLCNIC_BRDTYPE_P3_4_GB_MM) {
1151 u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
1152 if ((gpio & 0x8000) == 0)
1153 board_type = QLCNIC_BRDTYPE_P3_10G_TP;
1154 }
1155
1156 switch (board_type) {
1157 case QLCNIC_BRDTYPE_P3_HMEZ:
1158 case QLCNIC_BRDTYPE_P3_XG_LOM:
1159 case QLCNIC_BRDTYPE_P3_10G_CX4:
1160 case QLCNIC_BRDTYPE_P3_10G_CX4_LP:
1161 case QLCNIC_BRDTYPE_P3_IMEZ:
1162 case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS:
1163 case QLCNIC_BRDTYPE_P3_10G_SFP_CT:
1164 case QLCNIC_BRDTYPE_P3_10G_SFP_QT:
1165 case QLCNIC_BRDTYPE_P3_10G_XFP:
1166 case QLCNIC_BRDTYPE_P3_10000_BASE_T:
1167 adapter->ahw.port_type = QLCNIC_XGBE;
1168 break;
1169 case QLCNIC_BRDTYPE_P3_REF_QG:
1170 case QLCNIC_BRDTYPE_P3_4_GB:
1171 case QLCNIC_BRDTYPE_P3_4_GB_MM:
1172 adapter->ahw.port_type = QLCNIC_GBE;
1173 break;
1174 case QLCNIC_BRDTYPE_P3_10G_TP:
1175 adapter->ahw.port_type = (adapter->portnum < 2) ?
1176 QLCNIC_XGBE : QLCNIC_GBE;
1177 break;
1178 default:
1179 dev_err(&pdev->dev, "unknown board type %x\n", board_type);
1180 adapter->ahw.port_type = QLCNIC_XGBE;
1181 break;
1182 }
1183
1184 return 0;
1185}
1186
1187int
1188qlcnic_wol_supported(struct qlcnic_adapter *adapter)
1189{
1190 u32 wol_cfg;
1191
1192 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
1193 if (wol_cfg & (1UL << adapter->portnum)) {
1194 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
1195 if (wol_cfg & (1 << adapter->portnum))
1196 return 1;
1197 }
1198
1199 return 0;
1200}
1201
1202int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
1203{
1204 struct qlcnic_nic_req req;
1205 int rv;
1206 u64 word;
1207
1208 memset(&req, 0, sizeof(struct qlcnic_nic_req));
1209 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
1210
1211 word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
1212 req.req_hdr = cpu_to_le64(word);
1213
1214 req.words[0] = cpu_to_le64((u64)rate << 32);
1215 req.words[1] = cpu_to_le64(state);
1216
1217 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
1218 if (rv)
1219 dev_err(&adapter->pdev->dev, "LED configuration failed.\n");
1220
1221 return rv;
1222}
1223
1224static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u32 flag)
1225{
1226 struct qlcnic_nic_req req;
1227 int rv;
1228 u64 word;
1229
1230 memset(&req, 0, sizeof(struct qlcnic_nic_req));
1231 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
1232
1233 word = QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
1234 ((u64)adapter->portnum << 16);
1235 req.req_hdr = cpu_to_le64(word);
1236 req.words[0] = cpu_to_le64(flag);
1237
1238 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
1239 if (rv)
1240 dev_err(&adapter->pdev->dev,
1241 "%sting loopback mode failed.\n",
1242 flag ? "Set" : "Reset");
1243 return rv;
1244}
1245
1246int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter)
1247{
1248 if (qlcnic_set_fw_loopback(adapter, 1))
1249 return -EIO;
1250
1251 if (qlcnic_nic_set_promisc(adapter,
1252 VPORT_MISS_MODE_ACCEPT_ALL)) {
1253 qlcnic_set_fw_loopback(adapter, 0);
1254 return -EIO;
1255 }
1256
1257 msleep(1000);
1258 return 0;
1259}
1260
1261void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter)
1262{
1263 int mode = VPORT_MISS_MODE_DROP;
1264 struct net_device *netdev = adapter->netdev;
1265
1266 qlcnic_set_fw_loopback(adapter, 0);
1267
1268 if (netdev->flags & IFF_PROMISC)
1269 mode = VPORT_MISS_MODE_ACCEPT_ALL;
1270 else if (netdev->flags & IFF_ALLMULTI)
1271 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
1272
1273 qlcnic_nic_set_promisc(adapter, mode);
1274}
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
new file mode 100644
index 000000000000..ea00ab4d4feb
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -0,0 +1,1541 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include <linux/netdevice.h>
26#include <linux/delay.h>
27#include "qlcnic.h"
28
29struct crb_addr_pair {
30 u32 addr;
31 u32 data;
32};
33
34#define QLCNIC_MAX_CRB_XFORM 60
35static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM];
36
37#define crb_addr_transform(name) \
38 (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \
39 QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20)
40
41#define QLCNIC_ADDR_ERROR (0xffffffff)
42
43static void
44qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
45 struct qlcnic_host_rds_ring *rds_ring);
46
47static void crb_addr_transform_setup(void)
48{
49 crb_addr_transform(XDMA);
50 crb_addr_transform(TIMR);
51 crb_addr_transform(SRE);
52 crb_addr_transform(SQN3);
53 crb_addr_transform(SQN2);
54 crb_addr_transform(SQN1);
55 crb_addr_transform(SQN0);
56 crb_addr_transform(SQS3);
57 crb_addr_transform(SQS2);
58 crb_addr_transform(SQS1);
59 crb_addr_transform(SQS0);
60 crb_addr_transform(RPMX7);
61 crb_addr_transform(RPMX6);
62 crb_addr_transform(RPMX5);
63 crb_addr_transform(RPMX4);
64 crb_addr_transform(RPMX3);
65 crb_addr_transform(RPMX2);
66 crb_addr_transform(RPMX1);
67 crb_addr_transform(RPMX0);
68 crb_addr_transform(ROMUSB);
69 crb_addr_transform(SN);
70 crb_addr_transform(QMN);
71 crb_addr_transform(QMS);
72 crb_addr_transform(PGNI);
73 crb_addr_transform(PGND);
74 crb_addr_transform(PGN3);
75 crb_addr_transform(PGN2);
76 crb_addr_transform(PGN1);
77 crb_addr_transform(PGN0);
78 crb_addr_transform(PGSI);
79 crb_addr_transform(PGSD);
80 crb_addr_transform(PGS3);
81 crb_addr_transform(PGS2);
82 crb_addr_transform(PGS1);
83 crb_addr_transform(PGS0);
84 crb_addr_transform(PS);
85 crb_addr_transform(PH);
86 crb_addr_transform(NIU);
87 crb_addr_transform(I2Q);
88 crb_addr_transform(EG);
89 crb_addr_transform(MN);
90 crb_addr_transform(MS);
91 crb_addr_transform(CAS2);
92 crb_addr_transform(CAS1);
93 crb_addr_transform(CAS0);
94 crb_addr_transform(CAM);
95 crb_addr_transform(C2C1);
96 crb_addr_transform(C2C0);
97 crb_addr_transform(SMB);
98 crb_addr_transform(OCM0);
99 crb_addr_transform(I2C0);
100}
101
102void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
103{
104 struct qlcnic_recv_context *recv_ctx;
105 struct qlcnic_host_rds_ring *rds_ring;
106 struct qlcnic_rx_buffer *rx_buf;
107 int i, ring;
108
109 recv_ctx = &adapter->recv_ctx;
110 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
111 rds_ring = &recv_ctx->rds_rings[ring];
112 for (i = 0; i < rds_ring->num_desc; ++i) {
113 rx_buf = &(rds_ring->rx_buf_arr[i]);
114 if (rx_buf->state == QLCNIC_BUFFER_FREE)
115 continue;
116 pci_unmap_single(adapter->pdev,
117 rx_buf->dma,
118 rds_ring->dma_size,
119 PCI_DMA_FROMDEVICE);
120 if (rx_buf->skb != NULL)
121 dev_kfree_skb_any(rx_buf->skb);
122 }
123 }
124}
125
126void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
127{
128 struct qlcnic_cmd_buffer *cmd_buf;
129 struct qlcnic_skb_frag *buffrag;
130 int i, j;
131 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
132
133 cmd_buf = tx_ring->cmd_buf_arr;
134 for (i = 0; i < tx_ring->num_desc; i++) {
135 buffrag = cmd_buf->frag_array;
136 if (buffrag->dma) {
137 pci_unmap_single(adapter->pdev, buffrag->dma,
138 buffrag->length, PCI_DMA_TODEVICE);
139 buffrag->dma = 0ULL;
140 }
141 for (j = 0; j < cmd_buf->frag_count; j++) {
142 buffrag++;
143 if (buffrag->dma) {
144 pci_unmap_page(adapter->pdev, buffrag->dma,
145 buffrag->length,
146 PCI_DMA_TODEVICE);
147 buffrag->dma = 0ULL;
148 }
149 }
150 if (cmd_buf->skb) {
151 dev_kfree_skb_any(cmd_buf->skb);
152 cmd_buf->skb = NULL;
153 }
154 cmd_buf++;
155 }
156}
157
158void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
159{
160 struct qlcnic_recv_context *recv_ctx;
161 struct qlcnic_host_rds_ring *rds_ring;
162 struct qlcnic_host_tx_ring *tx_ring;
163 int ring;
164
165 recv_ctx = &adapter->recv_ctx;
166
167 if (recv_ctx->rds_rings == NULL)
168 goto skip_rds;
169
170 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
171 rds_ring = &recv_ctx->rds_rings[ring];
172 vfree(rds_ring->rx_buf_arr);
173 rds_ring->rx_buf_arr = NULL;
174 }
175 kfree(recv_ctx->rds_rings);
176
177skip_rds:
178 if (adapter->tx_ring == NULL)
179 return;
180
181 tx_ring = adapter->tx_ring;
182 vfree(tx_ring->cmd_buf_arr);
183 kfree(adapter->tx_ring);
184}
185
186int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
187{
188 struct qlcnic_recv_context *recv_ctx;
189 struct qlcnic_host_rds_ring *rds_ring;
190 struct qlcnic_host_sds_ring *sds_ring;
191 struct qlcnic_host_tx_ring *tx_ring;
192 struct qlcnic_rx_buffer *rx_buf;
193 int ring, i, size;
194
195 struct qlcnic_cmd_buffer *cmd_buf_arr;
196 struct net_device *netdev = adapter->netdev;
197
198 size = sizeof(struct qlcnic_host_tx_ring);
199 tx_ring = kzalloc(size, GFP_KERNEL);
200 if (tx_ring == NULL) {
201 dev_err(&netdev->dev, "failed to allocate tx ring struct\n");
202 return -ENOMEM;
203 }
204 adapter->tx_ring = tx_ring;
205
206 tx_ring->num_desc = adapter->num_txd;
207 tx_ring->txq = netdev_get_tx_queue(netdev, 0);
208
209 cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
210 if (cmd_buf_arr == NULL) {
211 dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
212 return -ENOMEM;
213 }
214 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
215 tx_ring->cmd_buf_arr = cmd_buf_arr;
216
217 recv_ctx = &adapter->recv_ctx;
218
219 size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring);
220 rds_ring = kzalloc(size, GFP_KERNEL);
221 if (rds_ring == NULL) {
222 dev_err(&netdev->dev, "failed to allocate rds ring struct\n");
223 return -ENOMEM;
224 }
225 recv_ctx->rds_rings = rds_ring;
226
227 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
228 rds_ring = &recv_ctx->rds_rings[ring];
229 switch (ring) {
230 case RCV_RING_NORMAL:
231 rds_ring->num_desc = adapter->num_rxd;
232 if (adapter->ahw.cut_through) {
233 rds_ring->dma_size =
234 QLCNIC_CT_DEFAULT_RX_BUF_LEN;
235 rds_ring->skb_size =
236 QLCNIC_CT_DEFAULT_RX_BUF_LEN;
237 } else {
238 rds_ring->dma_size =
239 QLCNIC_P3_RX_BUF_MAX_LEN;
240 rds_ring->skb_size =
241 rds_ring->dma_size + NET_IP_ALIGN;
242 }
243 break;
244
245 case RCV_RING_JUMBO:
246 rds_ring->num_desc = adapter->num_jumbo_rxd;
247 rds_ring->dma_size =
248 QLCNIC_P3_RX_JUMBO_BUF_MAX_LEN;
249
250 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
251 rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA;
252
253 rds_ring->skb_size =
254 rds_ring->dma_size + NET_IP_ALIGN;
255 break;
256
257 case RCV_RING_LRO:
258 rds_ring->num_desc = adapter->num_lro_rxd;
259 rds_ring->dma_size = QLCNIC_RX_LRO_BUFFER_LENGTH;
260 rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
261 break;
262
263 }
264 rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *)
265 vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
266 if (rds_ring->rx_buf_arr == NULL) {
267 dev_err(&netdev->dev, "Failed to allocate "
268 "rx buffer ring %d\n", ring);
269 goto err_out;
270 }
271 memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
272 INIT_LIST_HEAD(&rds_ring->free_list);
273 /*
274 * Now go through all of them, set reference handles
275 * and put them in the queues.
276 */
277 rx_buf = rds_ring->rx_buf_arr;
278 for (i = 0; i < rds_ring->num_desc; i++) {
279 list_add_tail(&rx_buf->list,
280 &rds_ring->free_list);
281 rx_buf->ref_handle = i;
282 rx_buf->state = QLCNIC_BUFFER_FREE;
283 rx_buf++;
284 }
285 spin_lock_init(&rds_ring->lock);
286 }
287
288 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
289 sds_ring = &recv_ctx->sds_rings[ring];
290 sds_ring->irq = adapter->msix_entries[ring].vector;
291 sds_ring->adapter = adapter;
292 sds_ring->num_desc = adapter->num_rxd;
293
294 for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
295 INIT_LIST_HEAD(&sds_ring->free_list[i]);
296 }
297
298 return 0;
299
300err_out:
301 qlcnic_free_sw_resources(adapter);
302 return -ENOMEM;
303}
304
305/*
306 * Utility to translate from internal Phantom CRB address
307 * to external PCI CRB address.
308 */
309static u32 qlcnic_decode_crb_addr(u32 addr)
310{
311 int i;
312 u32 base_addr, offset, pci_base;
313
314 crb_addr_transform_setup();
315
316 pci_base = QLCNIC_ADDR_ERROR;
317 base_addr = addr & 0xfff00000;
318 offset = addr & 0x000fffff;
319
320 for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) {
321 if (crb_addr_xform[i] == base_addr) {
322 pci_base = i << 20;
323 break;
324 }
325 }
326 if (pci_base == QLCNIC_ADDR_ERROR)
327 return pci_base;
328 else
329 return pci_base + offset;
330}
331
332#define QLCNIC_MAX_ROM_WAIT_USEC 100
333
334static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
335{
336 long timeout = 0;
337 long done = 0;
338
339 cond_resched();
340
341 while (done == 0) {
342 done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS);
343 done &= 2;
344 if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
345 dev_err(&adapter->pdev->dev,
346 "Timeout reached waiting for rom done");
347 return -EIO;
348 }
349 udelay(1);
350 }
351 return 0;
352}
353
354static int do_rom_fast_read(struct qlcnic_adapter *adapter,
355 int addr, int *valp)
356{
357 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
358 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
359 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
360 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb);
361 if (qlcnic_wait_rom_done(adapter)) {
362 dev_err(&adapter->pdev->dev, "Error waiting for rom done\n");
363 return -EIO;
364 }
365 /* reset abyte_cnt and dummy_byte_cnt */
366 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0);
367 udelay(10);
368 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
369
370 *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA);
371 return 0;
372}
373
374static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
375 u8 *bytes, size_t size)
376{
377 int addridx;
378 int ret = 0;
379
380 for (addridx = addr; addridx < (addr + size); addridx += 4) {
381 int v;
382 ret = do_rom_fast_read(adapter, addridx, &v);
383 if (ret != 0)
384 break;
385 *(__le32 *)bytes = cpu_to_le32(v);
386 bytes += 4;
387 }
388
389 return ret;
390}
391
392int
393qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
394 u8 *bytes, size_t size)
395{
396 int ret;
397
398 ret = qlcnic_rom_lock(adapter);
399 if (ret < 0)
400 return ret;
401
402 ret = do_rom_fast_read_words(adapter, addr, bytes, size);
403
404 qlcnic_rom_unlock(adapter);
405 return ret;
406}
407
408int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp)
409{
410 int ret;
411
412 if (qlcnic_rom_lock(adapter) != 0)
413 return -EIO;
414
415 ret = do_rom_fast_read(adapter, addr, valp);
416 qlcnic_rom_unlock(adapter);
417 return ret;
418}
419
420int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
421{
422 int addr, val;
423 int i, n, init_delay;
424 struct crb_addr_pair *buf;
425 unsigned offset;
426 u32 off;
427 struct pci_dev *pdev = adapter->pdev;
428
429 /* resetall */
430 qlcnic_rom_lock(adapter);
431 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xffffffff);
432 qlcnic_rom_unlock(adapter);
433
434 if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
435 qlcnic_rom_fast_read(adapter, 4, &n) != 0) {
436 dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n);
437 return -EIO;
438 }
439 offset = n & 0xffffU;
440 n = (n >> 16) & 0xffffU;
441
442 if (n >= 1024) {
443 dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n");
444 return -EIO;
445 }
446
447 buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
448 if (buf == NULL) {
449 dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n");
450 return -ENOMEM;
451 }
452
453 for (i = 0; i < n; i++) {
454 if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
455 qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
456 kfree(buf);
457 return -EIO;
458 }
459
460 buf[i].addr = addr;
461 buf[i].data = val;
462 }
463
464 for (i = 0; i < n; i++) {
465
466 off = qlcnic_decode_crb_addr(buf[i].addr);
467 if (off == QLCNIC_ADDR_ERROR) {
468 dev_err(&pdev->dev, "CRB init value out of range %x\n",
469 buf[i].addr);
470 continue;
471 }
472 off += QLCNIC_PCI_CRBSPACE;
473
474 if (off & 1)
475 continue;
476
477 /* skipping cold reboot MAGIC */
478 if (off == QLCNIC_CAM_RAM(0x1fc))
479 continue;
480 if (off == (QLCNIC_CRB_I2C0 + 0x1c))
481 continue;
482 if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */
483 continue;
484 if (off == (ROMUSB_GLB + 0xa8))
485 continue;
486 if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
487 continue;
488 if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
489 continue;
490 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
491 continue;
492 if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET)
493 continue;
494 /* skip the function enable register */
495 if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION))
496 continue;
497 if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2))
498 continue;
499 if ((off & 0x0ff00000) == QLCNIC_CRB_SMB)
500 continue;
501
502 init_delay = 1;
503 /* After writing this register, HW needs time for CRB */
504 /* to quiet down (else crb_window returns 0xffffffff) */
505 if (off == QLCNIC_ROMUSB_GLB_SW_RESET)
506 init_delay = 1000;
507
508 QLCWR32(adapter, off, buf[i].data);
509
510 msleep(init_delay);
511 }
512 kfree(buf);
513
514 /* p2dn replyCount */
515 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e);
516 /* disable_peg_cache 0 & 1*/
517 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8);
518 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8);
519
520 /* peg_clr_all */
521 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0);
522 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0);
523 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0);
524 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0);
525 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0);
526 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0);
527 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0);
528 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
529 return 0;
530}
531
532static int
533qlcnic_has_mn(struct qlcnic_adapter *adapter)
534{
535 u32 capability, flashed_ver;
536 capability = 0;
537
538 qlcnic_rom_fast_read(adapter,
539 QLCNIC_FW_VERSION_OFFSET, (int *)&flashed_ver);
540 flashed_ver = QLCNIC_DECODE_VERSION(flashed_ver);
541
542 if (flashed_ver >= QLCNIC_VERSION_CODE(4, 0, 220)) {
543
544 capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
545 if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
546 return 1;
547 }
548 return 0;
549}
550
551static
552struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section)
553{
554 u32 i;
555 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
556 __le32 entries = cpu_to_le32(directory->num_entries);
557
558 for (i = 0; i < entries; i++) {
559
560 __le32 offs = cpu_to_le32(directory->findex) +
561 (i * cpu_to_le32(directory->entry_size));
562 __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
563
564 if (tab_type == section)
565 return (struct uni_table_desc *) &unirom[offs];
566 }
567
568 return NULL;
569}
570
571static int
572qlcnic_set_product_offs(struct qlcnic_adapter *adapter)
573{
574 struct uni_table_desc *ptab_descr;
575 const u8 *unirom = adapter->fw->data;
576 u32 i;
577 __le32 entries;
578 int mn_present = qlcnic_has_mn(adapter);
579
580 ptab_descr = qlcnic_get_table_desc(unirom,
581 QLCNIC_UNI_DIR_SECT_PRODUCT_TBL);
582 if (ptab_descr == NULL)
583 return -1;
584
585 entries = cpu_to_le32(ptab_descr->num_entries);
586nomn:
587 for (i = 0; i < entries; i++) {
588
589 __le32 flags, file_chiprev, offs;
590 u8 chiprev = adapter->ahw.revision_id;
591 u32 flagbit;
592
593 offs = cpu_to_le32(ptab_descr->findex) +
594 (i * cpu_to_le32(ptab_descr->entry_size));
595 flags = cpu_to_le32(*((int *)&unirom[offs] +
596 QLCNIC_UNI_FLAGS_OFF));
597 file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
598 QLCNIC_UNI_CHIP_REV_OFF));
599
600 flagbit = mn_present ? 1 : 2;
601
602 if ((chiprev == file_chiprev) &&
603 ((1ULL << flagbit) & flags)) {
604 adapter->file_prd_off = offs;
605 return 0;
606 }
607 }
608 if (mn_present) {
609 mn_present = 0;
610 goto nomn;
611 }
612 return -1;
613}
614
615static
616struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter,
617 u32 section, u32 idx_offset)
618{
619 const u8 *unirom = adapter->fw->data;
620 int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
621 idx_offset));
622 struct uni_table_desc *tab_desc;
623 __le32 offs;
624
625 tab_desc = qlcnic_get_table_desc(unirom, section);
626
627 if (tab_desc == NULL)
628 return NULL;
629
630 offs = cpu_to_le32(tab_desc->findex) +
631 (cpu_to_le32(tab_desc->entry_size) * idx);
632
633 return (struct uni_data_desc *)&unirom[offs];
634}
635
636static u8 *
637qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter)
638{
639 u32 offs = QLCNIC_BOOTLD_START;
640
641 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
642 offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
643 QLCNIC_UNI_DIR_SECT_BOOTLD,
644 QLCNIC_UNI_BOOTLD_IDX_OFF))->findex);
645
646 return (u8 *)&adapter->fw->data[offs];
647}
648
649static u8 *
650qlcnic_get_fw_offs(struct qlcnic_adapter *adapter)
651{
652 u32 offs = QLCNIC_IMAGE_START;
653
654 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
655 offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
656 QLCNIC_UNI_DIR_SECT_FW,
657 QLCNIC_UNI_FIRMWARE_IDX_OFF))->findex);
658
659 return (u8 *)&adapter->fw->data[offs];
660}
661
662static __le32
663qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
664{
665 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
666 return cpu_to_le32((qlcnic_get_data_desc(adapter,
667 QLCNIC_UNI_DIR_SECT_FW,
668 QLCNIC_UNI_FIRMWARE_IDX_OFF))->size);
669 else
670 return cpu_to_le32(
671 *(u32 *)&adapter->fw->data[QLCNIC_FW_SIZE_OFFSET]);
672}
673
674static __le32
675qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
676{
677 struct uni_data_desc *fw_data_desc;
678 const struct firmware *fw = adapter->fw;
679 __le32 major, minor, sub;
680 const u8 *ver_str;
681 int i, ret;
682
683 if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
684 return cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]);
685
686 fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
687 QLCNIC_UNI_FIRMWARE_IDX_OFF);
688 ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
689 cpu_to_le32(fw_data_desc->size) - 17;
690
691 for (i = 0; i < 12; i++) {
692 if (!strncmp(&ver_str[i], "REV=", 4)) {
693 ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
694 &major, &minor, &sub);
695 if (ret != 3)
696 return 0;
697 else
698 return major + (minor << 8) + (sub << 16);
699 }
700 }
701
702 return 0;
703}
704
705static __le32
706qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
707{
708 const struct firmware *fw = adapter->fw;
709 __le32 bios_ver, prd_off = adapter->file_prd_off;
710
711 if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
712 return cpu_to_le32(
713 *(u32 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]);
714
715 bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
716 + QLCNIC_UNI_BIOS_VERSION_OFF));
717
718 return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
719}
720
721int
722qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
723{
724 u32 count, old_count;
725 u32 val, version, major, minor, build;
726 int i, timeout;
727
728 if (adapter->need_fw_reset)
729 return 1;
730
731 /* last attempt had failed */
732 if (QLCRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
733 return 1;
734
735 old_count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
736
737 for (i = 0; i < 10; i++) {
738
739 timeout = msleep_interruptible(200);
740 if (timeout) {
741 QLCWR32(adapter, CRB_CMDPEG_STATE,
742 PHAN_INITIALIZE_FAILED);
743 return -EINTR;
744 }
745
746 count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
747 if (count != old_count)
748 break;
749 }
750
751 /* firmware is dead */
752 if (count == old_count)
753 return 1;
754
755 /* check if we have got newer or different file firmware */
756 if (adapter->fw) {
757
758 val = qlcnic_get_fw_version(adapter);
759
760 version = QLCNIC_DECODE_VERSION(val);
761
762 major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
763 minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
764 build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
765
766 if (version > QLCNIC_VERSION_CODE(major, minor, build))
767 return 1;
768 }
769
770 return 0;
771}
772
773static const char *fw_name[] = {
774 QLCNIC_UNIFIED_ROMIMAGE_NAME,
775 QLCNIC_FLASH_ROMIMAGE_NAME,
776};
777
778int
779qlcnic_load_firmware(struct qlcnic_adapter *adapter)
780{
781 u64 *ptr64;
782 u32 i, flashaddr, size;
783 const struct firmware *fw = adapter->fw;
784 struct pci_dev *pdev = adapter->pdev;
785
786 dev_info(&pdev->dev, "loading firmware from %s\n",
787 fw_name[adapter->fw_type]);
788
789 if (fw) {
790 __le64 data;
791
792 size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
793
794 ptr64 = (u64 *)qlcnic_get_bootld_offs(adapter);
795 flashaddr = QLCNIC_BOOTLD_START;
796
797 for (i = 0; i < size; i++) {
798 data = cpu_to_le64(ptr64[i]);
799
800 if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data))
801 return -EIO;
802
803 flashaddr += 8;
804 }
805
806 size = (__force u32)qlcnic_get_fw_size(adapter) / 8;
807
808 ptr64 = (u64 *)qlcnic_get_fw_offs(adapter);
809 flashaddr = QLCNIC_IMAGE_START;
810
811 for (i = 0; i < size; i++) {
812 data = cpu_to_le64(ptr64[i]);
813
814 if (qlcnic_pci_mem_write_2M(adapter,
815 flashaddr, data))
816 return -EIO;
817
818 flashaddr += 8;
819 }
820 } else {
821 u64 data;
822 u32 hi, lo;
823
824 size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
825 flashaddr = QLCNIC_BOOTLD_START;
826
827 for (i = 0; i < size; i++) {
828 if (qlcnic_rom_fast_read(adapter,
829 flashaddr, (int *)&lo) != 0)
830 return -EIO;
831 if (qlcnic_rom_fast_read(adapter,
832 flashaddr + 4, (int *)&hi) != 0)
833 return -EIO;
834
835 data = (((u64)hi << 32) | lo);
836
837 if (qlcnic_pci_mem_write_2M(adapter,
838 flashaddr, data))
839 return -EIO;
840
841 flashaddr += 8;
842 }
843 }
844 msleep(1);
845
846 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020);
847 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e);
848 return 0;
849}
850
851static int
852qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
853{
854 __le32 val;
855 u32 ver, min_ver, bios, min_size;
856 struct pci_dev *pdev = adapter->pdev;
857 const struct firmware *fw = adapter->fw;
858 u8 fw_type = adapter->fw_type;
859
860 if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) {
861 if (qlcnic_set_product_offs(adapter))
862 return -EINVAL;
863
864 min_size = QLCNIC_UNI_FW_MIN_SIZE;
865 } else {
866 val = cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]);
867 if ((__force u32)val != QLCNIC_BDINFO_MAGIC)
868 return -EINVAL;
869
870 min_size = QLCNIC_FW_MIN_SIZE;
871 }
872
873 if (fw->size < min_size)
874 return -EINVAL;
875
876 val = qlcnic_get_fw_version(adapter);
877
878 min_ver = QLCNIC_VERSION_CODE(4, 0, 216);
879
880 ver = QLCNIC_DECODE_VERSION(val);
881
882 if ((_major(ver) > _QLCNIC_LINUX_MAJOR) || (ver < min_ver)) {
883 dev_err(&pdev->dev,
884 "%s: firmware version %d.%d.%d unsupported\n",
885 fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
886 return -EINVAL;
887 }
888
889 val = qlcnic_get_bios_version(adapter);
890 qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios);
891 if ((__force u32)val != bios) {
892 dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
893 fw_name[fw_type]);
894 return -EINVAL;
895 }
896
897 /* check if flashed firmware is newer */
898 if (qlcnic_rom_fast_read(adapter,
899 QLCNIC_FW_VERSION_OFFSET, (int *)&val))
900 return -EIO;
901
902 val = QLCNIC_DECODE_VERSION(val);
903 if (val > ver) {
904 dev_info(&pdev->dev, "%s: firmware is older than flash\n",
905 fw_name[fw_type]);
906 return -EINVAL;
907 }
908
909 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
910 return 0;
911}
912
913static void
914qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter)
915{
916 u8 fw_type;
917
918 switch (adapter->fw_type) {
919 case QLCNIC_UNKNOWN_ROMIMAGE:
920 fw_type = QLCNIC_UNIFIED_ROMIMAGE;
921 break;
922
923 case QLCNIC_UNIFIED_ROMIMAGE:
924 default:
925 fw_type = QLCNIC_FLASH_ROMIMAGE;
926 break;
927 }
928
929 adapter->fw_type = fw_type;
930}
931
932
933
934void qlcnic_request_firmware(struct qlcnic_adapter *adapter)
935{
936 struct pci_dev *pdev = adapter->pdev;
937 int rc;
938
939 adapter->fw_type = QLCNIC_UNKNOWN_ROMIMAGE;
940
941next:
942 qlcnic_get_next_fwtype(adapter);
943
944 if (adapter->fw_type == QLCNIC_FLASH_ROMIMAGE) {
945 adapter->fw = NULL;
946 } else {
947 rc = request_firmware(&adapter->fw,
948 fw_name[adapter->fw_type], &pdev->dev);
949 if (rc != 0)
950 goto next;
951
952 rc = qlcnic_validate_firmware(adapter);
953 if (rc != 0) {
954 release_firmware(adapter->fw);
955 msleep(1);
956 goto next;
957 }
958 }
959}
960
961
962void
963qlcnic_release_firmware(struct qlcnic_adapter *adapter)
964{
965 if (adapter->fw)
966 release_firmware(adapter->fw);
967 adapter->fw = NULL;
968}
969
970int qlcnic_phantom_init(struct qlcnic_adapter *adapter)
971{
972 u32 val;
973 int retries = 60;
974
975 do {
976 val = QLCRD32(adapter, CRB_CMDPEG_STATE);
977
978 switch (val) {
979 case PHAN_INITIALIZE_COMPLETE:
980 case PHAN_INITIALIZE_ACK:
981 return 0;
982 case PHAN_INITIALIZE_FAILED:
983 goto out_err;
984 default:
985 break;
986 }
987
988 msleep(500);
989
990 } while (--retries);
991
992 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
993
994out_err:
995 dev_err(&adapter->pdev->dev, "firmware init failed\n");
996 return -EIO;
997}
998
999static int
1000qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
1001{
1002 u32 val;
1003 int retries = 2000;
1004
1005 do {
1006 val = QLCRD32(adapter, CRB_RCVPEG_STATE);
1007
1008 if (val == PHAN_PEG_RCV_INITIALIZED)
1009 return 0;
1010
1011 msleep(10);
1012
1013 } while (--retries);
1014
1015 if (!retries) {
1016 dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
1017 "complete, state: 0x%x.\n", val);
1018 return -EIO;
1019 }
1020
1021 return 0;
1022}
1023
1024int qlcnic_init_firmware(struct qlcnic_adapter *adapter)
1025{
1026 int err;
1027
1028 err = qlcnic_receive_peg_ready(adapter);
1029 if (err)
1030 return err;
1031
1032 QLCWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
1033 QLCWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
1034 QLCWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
1035 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
1036
1037 return err;
1038}
1039
1040static void
1041qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
1042 struct qlcnic_fw_msg *msg)
1043{
1044 u32 cable_OUI;
1045 u16 cable_len;
1046 u16 link_speed;
1047 u8 link_status, module, duplex, autoneg;
1048 struct net_device *netdev = adapter->netdev;
1049
1050 adapter->has_link_events = 1;
1051
1052 cable_OUI = msg->body[1] & 0xffffffff;
1053 cable_len = (msg->body[1] >> 32) & 0xffff;
1054 link_speed = (msg->body[1] >> 48) & 0xffff;
1055
1056 link_status = msg->body[2] & 0xff;
1057 duplex = (msg->body[2] >> 16) & 0xff;
1058 autoneg = (msg->body[2] >> 24) & 0xff;
1059
1060 module = (msg->body[2] >> 8) & 0xff;
1061 if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
1062 dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, "
1063 "length %d\n", cable_OUI, cable_len);
1064 else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
1065 dev_info(&netdev->dev, "unsupported cable length %d\n",
1066 cable_len);
1067
1068 qlcnic_advert_link_change(adapter, link_status);
1069
1070 if (duplex == LINKEVENT_FULL_DUPLEX)
1071 adapter->link_duplex = DUPLEX_FULL;
1072 else
1073 adapter->link_duplex = DUPLEX_HALF;
1074
1075 adapter->module_type = module;
1076 adapter->link_autoneg = autoneg;
1077 adapter->link_speed = link_speed;
1078}
1079
1080static void
1081qlcnic_handle_fw_message(int desc_cnt, int index,
1082 struct qlcnic_host_sds_ring *sds_ring)
1083{
1084 struct qlcnic_fw_msg msg;
1085 struct status_desc *desc;
1086 int i = 0, opcode;
1087
1088 while (desc_cnt > 0 && i < 8) {
1089 desc = &sds_ring->desc_head[index];
1090 msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
1091 msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
1092
1093 index = get_next_index(index, sds_ring->num_desc);
1094 desc_cnt--;
1095 }
1096
1097 opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
1098 switch (opcode) {
1099 case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
1100 qlcnic_handle_linkevent(sds_ring->adapter, &msg);
1101 break;
1102 default:
1103 break;
1104 }
1105}
1106
1107static int
1108qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
1109 struct qlcnic_host_rds_ring *rds_ring,
1110 struct qlcnic_rx_buffer *buffer)
1111{
1112 struct sk_buff *skb;
1113 dma_addr_t dma;
1114 struct pci_dev *pdev = adapter->pdev;
1115
1116 buffer->skb = dev_alloc_skb(rds_ring->skb_size);
1117 if (!buffer->skb)
1118 return -ENOMEM;
1119
1120 skb = buffer->skb;
1121
1122 if (!adapter->ahw.cut_through)
1123 skb_reserve(skb, 2);
1124
1125 dma = pci_map_single(pdev, skb->data,
1126 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
1127
1128 if (pci_dma_mapping_error(pdev, dma)) {
1129 dev_kfree_skb_any(skb);
1130 buffer->skb = NULL;
1131 return -ENOMEM;
1132 }
1133
1134 buffer->skb = skb;
1135 buffer->dma = dma;
1136 buffer->state = QLCNIC_BUFFER_BUSY;
1137
1138 return 0;
1139}
1140
1141static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1142 struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum)
1143{
1144 struct qlcnic_rx_buffer *buffer;
1145 struct sk_buff *skb;
1146
1147 buffer = &rds_ring->rx_buf_arr[index];
1148
1149 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
1150 PCI_DMA_FROMDEVICE);
1151
1152 skb = buffer->skb;
1153 if (!skb)
1154 goto no_skb;
1155
1156 if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
1157 adapter->stats.csummed++;
1158 skb->ip_summed = CHECKSUM_UNNECESSARY;
1159 } else {
1160 skb->ip_summed = CHECKSUM_NONE;
1161 }
1162
1163 skb->dev = adapter->netdev;
1164
1165 buffer->skb = NULL;
1166no_skb:
1167 buffer->state = QLCNIC_BUFFER_FREE;
1168 return skb;
1169}
1170
1171static struct qlcnic_rx_buffer *
1172qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1173 struct qlcnic_host_sds_ring *sds_ring,
1174 int ring, u64 sts_data0)
1175{
1176 struct net_device *netdev = adapter->netdev;
1177 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1178 struct qlcnic_rx_buffer *buffer;
1179 struct sk_buff *skb;
1180 struct qlcnic_host_rds_ring *rds_ring;
1181 int index, length, cksum, pkt_offset;
1182
1183 if (unlikely(ring >= adapter->max_rds_rings))
1184 return NULL;
1185
1186 rds_ring = &recv_ctx->rds_rings[ring];
1187
1188 index = qlcnic_get_sts_refhandle(sts_data0);
1189 if (unlikely(index >= rds_ring->num_desc))
1190 return NULL;
1191
1192 buffer = &rds_ring->rx_buf_arr[index];
1193
1194 length = qlcnic_get_sts_totallength(sts_data0);
1195 cksum = qlcnic_get_sts_status(sts_data0);
1196 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1197
1198 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1199 if (!skb)
1200 return buffer;
1201
1202 if (length > rds_ring->skb_size)
1203 skb_put(skb, rds_ring->skb_size);
1204 else
1205 skb_put(skb, length);
1206
1207 if (pkt_offset)
1208 skb_pull(skb, pkt_offset);
1209
1210 skb->truesize = skb->len + sizeof(struct sk_buff);
1211 skb->protocol = eth_type_trans(skb, netdev);
1212
1213 napi_gro_receive(&sds_ring->napi, skb);
1214
1215 adapter->stats.rx_pkts++;
1216 adapter->stats.rxbytes += length;
1217
1218 return buffer;
1219}
1220
1221#define QLC_TCP_HDR_SIZE 20
1222#define QLC_TCP_TS_OPTION_SIZE 12
1223#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
1224
1225static struct qlcnic_rx_buffer *
1226qlcnic_process_lro(struct qlcnic_adapter *adapter,
1227 struct qlcnic_host_sds_ring *sds_ring,
1228 int ring, u64 sts_data0, u64 sts_data1)
1229{
1230 struct net_device *netdev = adapter->netdev;
1231 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1232 struct qlcnic_rx_buffer *buffer;
1233 struct sk_buff *skb;
1234 struct qlcnic_host_rds_ring *rds_ring;
1235 struct iphdr *iph;
1236 struct tcphdr *th;
1237 bool push, timestamp;
1238 int l2_hdr_offset, l4_hdr_offset;
1239 int index;
1240 u16 lro_length, length, data_offset;
1241 u32 seq_number;
1242
1243 if (unlikely(ring > adapter->max_rds_rings))
1244 return NULL;
1245
1246 rds_ring = &recv_ctx->rds_rings[ring];
1247
1248 index = qlcnic_get_lro_sts_refhandle(sts_data0);
1249 if (unlikely(index > rds_ring->num_desc))
1250 return NULL;
1251
1252 buffer = &rds_ring->rx_buf_arr[index];
1253
1254 timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
1255 lro_length = qlcnic_get_lro_sts_length(sts_data0);
1256 l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
1257 l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
1258 push = qlcnic_get_lro_sts_push_flag(sts_data0);
1259 seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
1260
1261 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1262 if (!skb)
1263 return buffer;
1264
1265 if (timestamp)
1266 data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
1267 else
1268 data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
1269
1270 skb_put(skb, lro_length + data_offset);
1271
1272 skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
1273
1274 skb_pull(skb, l2_hdr_offset);
1275 skb->protocol = eth_type_trans(skb, netdev);
1276
1277 iph = (struct iphdr *)skb->data;
1278 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1279
1280 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1281 iph->tot_len = htons(length);
1282 iph->check = 0;
1283 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1284 th->psh = push;
1285 th->seq = htonl(seq_number);
1286
1287 length = skb->len;
1288
1289 netif_receive_skb(skb);
1290
1291 adapter->stats.lro_pkts++;
1292 adapter->stats.rxbytes += length;
1293
1294 return buffer;
1295}
1296
1297int
1298qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
1299{
1300 struct qlcnic_adapter *adapter = sds_ring->adapter;
1301 struct list_head *cur;
1302 struct status_desc *desc;
1303 struct qlcnic_rx_buffer *rxbuf;
1304 u64 sts_data0, sts_data1;
1305
1306 int count = 0;
1307 int opcode, ring, desc_cnt;
1308 u32 consumer = sds_ring->consumer;
1309
1310 while (count < max) {
1311 desc = &sds_ring->desc_head[consumer];
1312 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1313
1314 if (!(sts_data0 & STATUS_OWNER_HOST))
1315 break;
1316
1317 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1318 opcode = qlcnic_get_sts_opcode(sts_data0);
1319
1320 switch (opcode) {
1321 case QLCNIC_RXPKT_DESC:
1322 case QLCNIC_OLD_RXPKT_DESC:
1323 case QLCNIC_SYN_OFFLOAD:
1324 ring = qlcnic_get_sts_type(sts_data0);
1325 rxbuf = qlcnic_process_rcv(adapter, sds_ring,
1326 ring, sts_data0);
1327 break;
1328 case QLCNIC_LRO_DESC:
1329 ring = qlcnic_get_lro_sts_type(sts_data0);
1330 sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
1331 rxbuf = qlcnic_process_lro(adapter, sds_ring,
1332 ring, sts_data0, sts_data1);
1333 break;
1334 case QLCNIC_RESPONSE_DESC:
1335 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1336 default:
1337 goto skip;
1338 }
1339
1340 WARN_ON(desc_cnt > 1);
1341
1342 if (rxbuf)
1343 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1344
1345skip:
1346 for (; desc_cnt > 0; desc_cnt--) {
1347 desc = &sds_ring->desc_head[consumer];
1348 desc->status_desc_data[0] =
1349 cpu_to_le64(STATUS_OWNER_PHANTOM);
1350 consumer = get_next_index(consumer, sds_ring->num_desc);
1351 }
1352 count++;
1353 }
1354
1355 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1356 struct qlcnic_host_rds_ring *rds_ring =
1357 &adapter->recv_ctx.rds_rings[ring];
1358
1359 if (!list_empty(&sds_ring->free_list[ring])) {
1360 list_for_each(cur, &sds_ring->free_list[ring]) {
1361 rxbuf = list_entry(cur,
1362 struct qlcnic_rx_buffer, list);
1363 qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1364 }
1365 spin_lock(&rds_ring->lock);
1366 list_splice_tail_init(&sds_ring->free_list[ring],
1367 &rds_ring->free_list);
1368 spin_unlock(&rds_ring->lock);
1369 }
1370
1371 qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
1372 }
1373
1374 if (count) {
1375 sds_ring->consumer = consumer;
1376 writel(consumer, sds_ring->crb_sts_consumer);
1377 }
1378
1379 return count;
1380}
1381
1382void
1383qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
1384 struct qlcnic_host_rds_ring *rds_ring)
1385{
1386 struct rcv_desc *pdesc;
1387 struct qlcnic_rx_buffer *buffer;
1388 int producer, count = 0;
1389 struct list_head *head;
1390
1391 producer = rds_ring->producer;
1392
1393 spin_lock(&rds_ring->lock);
1394 head = &rds_ring->free_list;
1395 while (!list_empty(head)) {
1396
1397 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1398
1399 if (!buffer->skb) {
1400 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1401 break;
1402 }
1403
1404 count++;
1405 list_del(&buffer->list);
1406
1407 /* make a rcv descriptor */
1408 pdesc = &rds_ring->desc_head[producer];
1409 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1410 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1411 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1412
1413 producer = get_next_index(producer, rds_ring->num_desc);
1414 }
1415 spin_unlock(&rds_ring->lock);
1416
1417 if (count) {
1418 rds_ring->producer = producer;
1419 writel((producer-1) & (rds_ring->num_desc-1),
1420 rds_ring->crb_rcv_producer);
1421 }
1422}
1423
1424static void
1425qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
1426 struct qlcnic_host_rds_ring *rds_ring)
1427{
1428 struct rcv_desc *pdesc;
1429 struct qlcnic_rx_buffer *buffer;
1430 int producer, count = 0;
1431 struct list_head *head;
1432
1433 producer = rds_ring->producer;
1434 if (!spin_trylock(&rds_ring->lock))
1435 return;
1436
1437 head = &rds_ring->free_list;
1438 while (!list_empty(head)) {
1439
1440 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1441
1442 if (!buffer->skb) {
1443 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1444 break;
1445 }
1446
1447 count++;
1448 list_del(&buffer->list);
1449
1450 /* make a rcv descriptor */
1451 pdesc = &rds_ring->desc_head[producer];
1452 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1453 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1454 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1455
1456 producer = get_next_index(producer, rds_ring->num_desc);
1457 }
1458
1459 if (count) {
1460 rds_ring->producer = producer;
1461 writel((producer - 1) & (rds_ring->num_desc - 1),
1462 rds_ring->crb_rcv_producer);
1463 }
1464 spin_unlock(&rds_ring->lock);
1465}
1466
1467static struct qlcnic_rx_buffer *
1468qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
1469 struct qlcnic_host_sds_ring *sds_ring,
1470 int ring, u64 sts_data0)
1471{
1472 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1473 struct qlcnic_rx_buffer *buffer;
1474 struct sk_buff *skb;
1475 struct qlcnic_host_rds_ring *rds_ring;
1476 int index, length, cksum, pkt_offset;
1477
1478 if (unlikely(ring >= adapter->max_rds_rings))
1479 return NULL;
1480
1481 rds_ring = &recv_ctx->rds_rings[ring];
1482
1483 index = qlcnic_get_sts_refhandle(sts_data0);
1484 if (unlikely(index >= rds_ring->num_desc))
1485 return NULL;
1486
1487 buffer = &rds_ring->rx_buf_arr[index];
1488
1489 length = qlcnic_get_sts_totallength(sts_data0);
1490 cksum = qlcnic_get_sts_status(sts_data0);
1491 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1492
1493 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1494 if (!skb)
1495 return buffer;
1496
1497 skb_put(skb, rds_ring->skb_size);
1498
1499 if (pkt_offset)
1500 skb_pull(skb, pkt_offset);
1501
1502 skb->truesize = skb->len + sizeof(struct sk_buff);
1503
1504 if (!qlcnic_check_loopback_buff(skb->data))
1505 adapter->diag_cnt++;
1506
1507 dev_kfree_skb_any(skb);
1508
1509 return buffer;
1510}
1511
1512void
1513qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1514{
1515 struct qlcnic_adapter *adapter = sds_ring->adapter;
1516 struct status_desc *desc;
1517 struct qlcnic_rx_buffer *rxbuf;
1518 u64 sts_data0;
1519
1520 int opcode, ring, desc_cnt;
1521 u32 consumer = sds_ring->consumer;
1522
1523 desc = &sds_ring->desc_head[consumer];
1524 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1525
1526 if (!(sts_data0 & STATUS_OWNER_HOST))
1527 return;
1528
1529 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1530 opcode = qlcnic_get_sts_opcode(sts_data0);
1531
1532 ring = qlcnic_get_sts_type(sts_data0);
1533 rxbuf = qlcnic_process_rcv_diag(adapter, sds_ring,
1534 ring, sts_data0);
1535
1536 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1537 consumer = get_next_index(consumer, sds_ring->num_desc);
1538
1539 sds_ring->consumer = consumer;
1540 writel(consumer, sds_ring->crb_sts_consumer);
1541}
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
new file mode 100644
index 000000000000..665e8e56b6a8
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -0,0 +1,2720 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include <linux/vmalloc.h>
26#include <linux/interrupt.h>
27
28#include "qlcnic.h"
29
30#include <linux/dma-mapping.h>
31#include <linux/if_vlan.h>
32#include <net/ip.h>
33#include <linux/ipv6.h>
34#include <linux/inetdevice.h>
35#include <linux/sysfs.h>
36
37MODULE_DESCRIPTION("QLogic 10 GbE Converged Ethernet Driver");
38MODULE_LICENSE("GPL");
39MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
40MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
41
42char qlcnic_driver_name[] = "qlcnic";
43static const char qlcnic_driver_string[] = "QLogic Converged Ethernet Driver v"
44 QLCNIC_LINUX_VERSIONID;
45
46static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
47
48/* Default to restricted 1G auto-neg mode */
49static int wol_port_mode = 5;
50
51static int use_msi = 1;
52module_param(use_msi, int, 0644);
53MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
54
55static int use_msi_x = 1;
56module_param(use_msi_x, int, 0644);
57MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
58
59static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
60module_param(auto_fw_reset, int, 0644);
61MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
62
63static int __devinit qlcnic_probe(struct pci_dev *pdev,
64 const struct pci_device_id *ent);
65static void __devexit qlcnic_remove(struct pci_dev *pdev);
66static int qlcnic_open(struct net_device *netdev);
67static int qlcnic_close(struct net_device *netdev);
68static void qlcnic_tx_timeout(struct net_device *netdev);
69static void qlcnic_tx_timeout_task(struct work_struct *work);
70static void qlcnic_attach_work(struct work_struct *work);
71static void qlcnic_fwinit_work(struct work_struct *work);
72static void qlcnic_fw_poll_work(struct work_struct *work);
73static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
74 work_func_t func, int delay);
75static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
76static int qlcnic_poll(struct napi_struct *napi, int budget);
77#ifdef CONFIG_NET_POLL_CONTROLLER
78static void qlcnic_poll_controller(struct net_device *netdev);
79#endif
80
81static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
82static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
83static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
84static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
85
86static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
87static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
88
89static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
90static irqreturn_t qlcnic_intr(int irq, void *data);
91static irqreturn_t qlcnic_msi_intr(int irq, void *data);
92static irqreturn_t qlcnic_msix_intr(int irq, void *data);
93
94static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
95static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
96
97/* PCI Device ID Table */
98#define ENTRY(device) \
99 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
100 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
101
102#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
103
104static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
105 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
106 {0,}
107};
108
109MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
110
111
112void
113qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
114 struct qlcnic_host_tx_ring *tx_ring)
115{
116 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
117
118 if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
119 netif_stop_queue(adapter->netdev);
120 smp_mb();
121 }
122}
123
124static const u32 msi_tgt_status[8] = {
125 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
126 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
127 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
128 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
129};
130
131static const
132struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
133
134static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
135{
136 writel(0, sds_ring->crb_intr_mask);
137}
138
139static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
140{
141 struct qlcnic_adapter *adapter = sds_ring->adapter;
142
143 writel(0x1, sds_ring->crb_intr_mask);
144
145 if (!QLCNIC_IS_MSI_FAMILY(adapter))
146 writel(0xfbff, adapter->tgt_mask_reg);
147}
148
149static int
150qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
151{
152 int size = sizeof(struct qlcnic_host_sds_ring) * count;
153
154 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
155
156 return (recv_ctx->sds_rings == NULL);
157}
158
159static void
160qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
161{
162 if (recv_ctx->sds_rings != NULL)
163 kfree(recv_ctx->sds_rings);
164
165 recv_ctx->sds_rings = NULL;
166}
167
168static int
169qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
170{
171 int ring;
172 struct qlcnic_host_sds_ring *sds_ring;
173 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
174
175 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
176 return -ENOMEM;
177
178 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
179 sds_ring = &recv_ctx->sds_rings[ring];
180 netif_napi_add(netdev, &sds_ring->napi,
181 qlcnic_poll, QLCNIC_NETDEV_WEIGHT);
182 }
183
184 return 0;
185}
186
187static void
188qlcnic_napi_del(struct qlcnic_adapter *adapter)
189{
190 int ring;
191 struct qlcnic_host_sds_ring *sds_ring;
192 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
193
194 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
195 sds_ring = &recv_ctx->sds_rings[ring];
196 netif_napi_del(&sds_ring->napi);
197 }
198
199 qlcnic_free_sds_rings(&adapter->recv_ctx);
200}
201
202static void
203qlcnic_napi_enable(struct qlcnic_adapter *adapter)
204{
205 int ring;
206 struct qlcnic_host_sds_ring *sds_ring;
207 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
208
209 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
210 sds_ring = &recv_ctx->sds_rings[ring];
211 napi_enable(&sds_ring->napi);
212 qlcnic_enable_int(sds_ring);
213 }
214}
215
216static void
217qlcnic_napi_disable(struct qlcnic_adapter *adapter)
218{
219 int ring;
220 struct qlcnic_host_sds_ring *sds_ring;
221 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
222
223 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
224 sds_ring = &recv_ctx->sds_rings[ring];
225 qlcnic_disable_int(sds_ring);
226 napi_synchronize(&sds_ring->napi);
227 napi_disable(&sds_ring->napi);
228 }
229}
230
231static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
232{
233 memset(&adapter->stats, 0, sizeof(adapter->stats));
234 return;
235}
236
237static int qlcnic_set_dma_mask(struct qlcnic_adapter *adapter)
238{
239 struct pci_dev *pdev = adapter->pdev;
240 u64 mask, cmask;
241
242 adapter->pci_using_dac = 0;
243
244 mask = DMA_BIT_MASK(39);
245 cmask = mask;
246
247 if (pci_set_dma_mask(pdev, mask) == 0 &&
248 pci_set_consistent_dma_mask(pdev, cmask) == 0) {
249 adapter->pci_using_dac = 1;
250 return 0;
251 }
252
253 return -EIO;
254}
255
256/* Update addressable range if firmware supports it */
257static int
258qlcnic_update_dma_mask(struct qlcnic_adapter *adapter)
259{
260 int change, shift, err;
261 u64 mask, old_mask, old_cmask;
262 struct pci_dev *pdev = adapter->pdev;
263
264 change = 0;
265
266 shift = QLCRD32(adapter, CRB_DMA_SHIFT);
267 if (shift > 32)
268 return 0;
269
270 if (shift > 9)
271 change = 1;
272
273 if (change) {
274 old_mask = pdev->dma_mask;
275 old_cmask = pdev->dev.coherent_dma_mask;
276
277 mask = DMA_BIT_MASK(32+shift);
278
279 err = pci_set_dma_mask(pdev, mask);
280 if (err)
281 goto err_out;
282
283 err = pci_set_consistent_dma_mask(pdev, mask);
284 if (err)
285 goto err_out;
286 dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
287 }
288
289 return 0;
290
291err_out:
292 pci_set_dma_mask(pdev, old_mask);
293 pci_set_consistent_dma_mask(pdev, old_cmask);
294 return err;
295}
296
297static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
298{
299 u32 val, data;
300
301 val = adapter->ahw.board_type;
302 if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
303 (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
304 if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
305 data = QLCNIC_PORT_MODE_802_3_AP;
306 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
307 } else if (port_mode == QLCNIC_PORT_MODE_XG) {
308 data = QLCNIC_PORT_MODE_XG;
309 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
310 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
311 data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
312 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
313 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
314 data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
315 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
316 } else {
317 data = QLCNIC_PORT_MODE_AUTO_NEG;
318 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
319 }
320
321 if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
322 (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
323 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
324 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
325 wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
326 }
327 QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
328 }
329}
330
331static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
332{
333 u32 control;
334 int pos;
335
336 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
337 if (pos) {
338 pci_read_config_dword(pdev, pos, &control);
339 if (enable)
340 control |= PCI_MSIX_FLAGS_ENABLE;
341 else
342 control = 0;
343 pci_write_config_dword(pdev, pos, control);
344 }
345}
346
347static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
348{
349 int i;
350
351 for (i = 0; i < count; i++)
352 adapter->msix_entries[i].entry = i;
353}
354
355static int
356qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
357{
358 int i;
359 unsigned char *p;
360 u64 mac_addr;
361 struct net_device *netdev = adapter->netdev;
362 struct pci_dev *pdev = adapter->pdev;
363
364 if (qlcnic_get_mac_addr(adapter, &mac_addr) != 0)
365 return -EIO;
366
367 p = (unsigned char *)&mac_addr;
368 for (i = 0; i < 6; i++)
369 netdev->dev_addr[i] = *(p + 5 - i);
370
371 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
372 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
373
374 /* set station address */
375
376 if (!is_valid_ether_addr(netdev->perm_addr))
377 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
378 netdev->dev_addr);
379
380 return 0;
381}
382
383static int qlcnic_set_mac(struct net_device *netdev, void *p)
384{
385 struct qlcnic_adapter *adapter = netdev_priv(netdev);
386 struct sockaddr *addr = p;
387
388 if (!is_valid_ether_addr(addr->sa_data))
389 return -EINVAL;
390
391 if (netif_running(netdev)) {
392 netif_device_detach(netdev);
393 qlcnic_napi_disable(adapter);
394 }
395
396 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
397 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
398 qlcnic_set_multi(adapter->netdev);
399
400 if (netif_running(netdev)) {
401 netif_device_attach(netdev);
402 qlcnic_napi_enable(adapter);
403 }
404 return 0;
405}
406
407static const struct net_device_ops qlcnic_netdev_ops = {
408 .ndo_open = qlcnic_open,
409 .ndo_stop = qlcnic_close,
410 .ndo_start_xmit = qlcnic_xmit_frame,
411 .ndo_get_stats = qlcnic_get_stats,
412 .ndo_validate_addr = eth_validate_addr,
413 .ndo_set_multicast_list = qlcnic_set_multi,
414 .ndo_set_mac_address = qlcnic_set_mac,
415 .ndo_change_mtu = qlcnic_change_mtu,
416 .ndo_tx_timeout = qlcnic_tx_timeout,
417#ifdef CONFIG_NET_POLL_CONTROLLER
418 .ndo_poll_controller = qlcnic_poll_controller,
419#endif
420};
421
422static void
423qlcnic_setup_intr(struct qlcnic_adapter *adapter)
424{
425 const struct qlcnic_legacy_intr_set *legacy_intrp;
426 struct pci_dev *pdev = adapter->pdev;
427 int err, num_msix;
428
429 if (adapter->rss_supported) {
430 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
431 MSIX_ENTRIES_PER_ADAPTER : 2;
432 } else
433 num_msix = 1;
434
435 adapter->max_sds_rings = 1;
436
437 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
438
439 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
440
441 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
442 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
443 legacy_intrp->tgt_status_reg);
444 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
445 legacy_intrp->tgt_mask_reg);
446 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
447
448 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
449 ISR_INT_STATE_REG);
450
451 qlcnic_set_msix_bit(pdev, 0);
452
453 if (adapter->msix_supported) {
454
455 qlcnic_init_msix_entries(adapter, num_msix);
456 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
457 if (err == 0) {
458 adapter->flags |= QLCNIC_MSIX_ENABLED;
459 qlcnic_set_msix_bit(pdev, 1);
460
461 if (adapter->rss_supported)
462 adapter->max_sds_rings = num_msix;
463
464 dev_info(&pdev->dev, "using msi-x interrupts\n");
465 return;
466 }
467
468 if (err > 0)
469 pci_disable_msix(pdev);
470
471 /* fall through for msi */
472 }
473
474 if (use_msi && !pci_enable_msi(pdev)) {
475 adapter->flags |= QLCNIC_MSI_ENABLED;
476 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
477 msi_tgt_status[adapter->ahw.pci_func]);
478 dev_info(&pdev->dev, "using msi interrupts\n");
479 adapter->msix_entries[0].vector = pdev->irq;
480 return;
481 }
482
483 dev_info(&pdev->dev, "using legacy interrupts\n");
484 adapter->msix_entries[0].vector = pdev->irq;
485}
486
487static void
488qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
489{
490 if (adapter->flags & QLCNIC_MSIX_ENABLED)
491 pci_disable_msix(adapter->pdev);
492 if (adapter->flags & QLCNIC_MSI_ENABLED)
493 pci_disable_msi(adapter->pdev);
494}
495
496static void
497qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
498{
499 if (adapter->ahw.pci_base0 != NULL)
500 iounmap(adapter->ahw.pci_base0);
501}
502
503static int
504qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
505{
506 void __iomem *mem_ptr0 = NULL;
507 resource_size_t mem_base;
508 unsigned long mem_len, pci_len0 = 0;
509
510 struct pci_dev *pdev = adapter->pdev;
511 int pci_func = adapter->ahw.pci_func;
512
513 /*
514 * Set the CRB window to invalid. If any register in window 0 is
515 * accessed it should set the window to 0 and then reset it to 1.
516 */
517 adapter->ahw.crb_win = -1;
518 adapter->ahw.ocm_win = -1;
519
520 /* remap phys address */
521 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
522 mem_len = pci_resource_len(pdev, 0);
523
524 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
525
526 mem_ptr0 = pci_ioremap_bar(pdev, 0);
527 if (mem_ptr0 == NULL) {
528 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
529 return -EIO;
530 }
531 pci_len0 = mem_len;
532 } else {
533 return -EIO;
534 }
535
536 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
537
538 adapter->ahw.pci_base0 = mem_ptr0;
539 adapter->ahw.pci_len0 = pci_len0;
540
541 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
542 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
543
544 return 0;
545}
546
547static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
548{
549 struct pci_dev *pdev = adapter->pdev;
550 int i, found = 0;
551
552 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
553 if (qlcnic_boards[i].vendor == pdev->vendor &&
554 qlcnic_boards[i].device == pdev->device &&
555 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
556 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
557 strcpy(name, qlcnic_boards[i].short_name);
558 found = 1;
559 break;
560 }
561
562 }
563
564 if (!found)
565 name = "Unknown";
566}
567
568static void
569qlcnic_check_options(struct qlcnic_adapter *adapter)
570{
571 u32 fw_major, fw_minor, fw_build;
572 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
573 char serial_num[32];
574 int i, offset, val;
575 int *ptr32;
576 struct pci_dev *pdev = adapter->pdev;
577
578 adapter->driver_mismatch = 0;
579
580 ptr32 = (int *)&serial_num;
581 offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
582 for (i = 0; i < 8; i++) {
583 if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
584 dev_err(&pdev->dev, "error reading board info\n");
585 adapter->driver_mismatch = 1;
586 return;
587 }
588 ptr32[i] = cpu_to_le32(val);
589 offset += sizeof(u32);
590 }
591
592 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
593 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
594 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
595
596 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
597
598 if (adapter->portnum == 0) {
599 get_brd_name(adapter, brd_name);
600
601 pr_info("%s: %s Board Chip rev 0x%x\n",
602 module_name(THIS_MODULE),
603 brd_name, adapter->ahw.revision_id);
604 }
605
606 if (adapter->fw_version < QLCNIC_VERSION_CODE(3, 4, 216)) {
607 adapter->driver_mismatch = 1;
608 dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n",
609 fw_major, fw_minor, fw_build);
610 return;
611 }
612
613 i = QLCRD32(adapter, QLCNIC_SRE_MISC);
614 adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
615
616 dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n",
617 fw_major, fw_minor, fw_build,
618 adapter->ahw.cut_through ? "cut-through" : "legacy");
619
620 if (adapter->fw_version >= QLCNIC_VERSION_CODE(4, 0, 222))
621 adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
622
623 adapter->flags &= ~QLCNIC_LRO_ENABLED;
624
625 if (adapter->ahw.port_type == QLCNIC_XGBE) {
626 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
627 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
628 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
629 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
630 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
631 }
632
633 adapter->msix_supported = !!use_msi_x;
634 adapter->rss_supported = !!use_msi_x;
635
636 adapter->num_txd = MAX_CMD_DESCRIPTORS;
637
638 adapter->num_lro_rxd = 0;
639 adapter->max_rds_rings = 2;
640}
641
642static int
643qlcnic_start_firmware(struct qlcnic_adapter *adapter)
644{
645 int val, err, first_boot;
646
647 err = qlcnic_set_dma_mask(adapter);
648 if (err)
649 return err;
650
651 if (!qlcnic_can_start_firmware(adapter))
652 goto wait_init;
653
654 first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
655 if (first_boot == 0x55555555)
656 /* This is the first boot after power up */
657 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
658
659 qlcnic_request_firmware(adapter);
660
661 err = qlcnic_need_fw_reset(adapter);
662 if (err < 0)
663 goto err_out;
664 if (err == 0)
665 goto wait_init;
666
667 if (first_boot != 0x55555555) {
668 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
669 qlcnic_pinit_from_rom(adapter);
670 msleep(1);
671 }
672
673 QLCWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
674 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
675 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
676
677 qlcnic_set_port_mode(adapter);
678
679 err = qlcnic_load_firmware(adapter);
680 if (err)
681 goto err_out;
682
683 qlcnic_release_firmware(adapter);
684
685 val = (_QLCNIC_LINUX_MAJOR << 16)
686 | ((_QLCNIC_LINUX_MINOR << 8))
687 | (_QLCNIC_LINUX_SUBVERSION);
688 QLCWR32(adapter, CRB_DRIVER_VERSION, val);
689
690wait_init:
691 /* Handshake with the card before we register the devices. */
692 err = qlcnic_phantom_init(adapter);
693 if (err)
694 goto err_out;
695
696 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
697
698 qlcnic_update_dma_mask(adapter);
699
700 qlcnic_check_options(adapter);
701
702 adapter->need_fw_reset = 0;
703
704 /* fall through and release firmware */
705
706err_out:
707 qlcnic_release_firmware(adapter);
708 return err;
709}
710
711static int
712qlcnic_request_irq(struct qlcnic_adapter *adapter)
713{
714 irq_handler_t handler;
715 struct qlcnic_host_sds_ring *sds_ring;
716 int err, ring;
717
718 unsigned long flags = 0;
719 struct net_device *netdev = adapter->netdev;
720 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
721
722 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
723 handler = qlcnic_tmp_intr;
724 if (!QLCNIC_IS_MSI_FAMILY(adapter))
725 flags |= IRQF_SHARED;
726
727 } else {
728 if (adapter->flags & QLCNIC_MSIX_ENABLED)
729 handler = qlcnic_msix_intr;
730 else if (adapter->flags & QLCNIC_MSI_ENABLED)
731 handler = qlcnic_msi_intr;
732 else {
733 flags |= IRQF_SHARED;
734 handler = qlcnic_intr;
735 }
736 }
737 adapter->irq = netdev->irq;
738
739 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
740 sds_ring = &recv_ctx->sds_rings[ring];
741 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
742 err = request_irq(sds_ring->irq, handler,
743 flags, sds_ring->name, sds_ring);
744 if (err)
745 return err;
746 }
747
748 return 0;
749}
750
751static void
752qlcnic_free_irq(struct qlcnic_adapter *adapter)
753{
754 int ring;
755 struct qlcnic_host_sds_ring *sds_ring;
756
757 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
758
759 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
760 sds_ring = &recv_ctx->sds_rings[ring];
761 free_irq(sds_ring->irq, sds_ring);
762 }
763}
764
765static void
766qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
767{
768 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
769 adapter->coal.normal.data.rx_time_us =
770 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
771 adapter->coal.normal.data.rx_packets =
772 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
773 adapter->coal.normal.data.tx_time_us =
774 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
775 adapter->coal.normal.data.tx_packets =
776 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
777}
778
779static int
780__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
781{
782 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
783 return -EIO;
784
785 qlcnic_set_multi(netdev);
786 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
787
788 adapter->ahw.linkup = 0;
789
790 if (adapter->max_sds_rings > 1)
791 qlcnic_config_rss(adapter, 1);
792
793 qlcnic_config_intr_coalesce(adapter);
794
795 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
796 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
797
798 qlcnic_napi_enable(adapter);
799
800 qlcnic_linkevent_request(adapter, 1);
801
802 set_bit(__QLCNIC_DEV_UP, &adapter->state);
803 return 0;
804}
805
806/* Usage: During resume and firmware recovery module.*/
807
808static int
809qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
810{
811 int err = 0;
812
813 rtnl_lock();
814 if (netif_running(netdev))
815 err = __qlcnic_up(adapter, netdev);
816 rtnl_unlock();
817
818 return err;
819}
820
821static void
822__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
823{
824 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
825 return;
826
827 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
828 return;
829
830 smp_mb();
831 spin_lock(&adapter->tx_clean_lock);
832 netif_carrier_off(netdev);
833 netif_tx_disable(netdev);
834
835 qlcnic_free_mac_list(adapter);
836
837 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
838
839 qlcnic_napi_disable(adapter);
840
841 qlcnic_release_tx_buffers(adapter);
842 spin_unlock(&adapter->tx_clean_lock);
843}
844
845/* Usage: During suspend and firmware recovery module */
846
847static void
848qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
849{
850 rtnl_lock();
851 if (netif_running(netdev))
852 __qlcnic_down(adapter, netdev);
853 rtnl_unlock();
854
855}
856
857static int
858qlcnic_attach(struct qlcnic_adapter *adapter)
859{
860 struct net_device *netdev = adapter->netdev;
861 struct pci_dev *pdev = adapter->pdev;
862 int err, ring;
863 struct qlcnic_host_rds_ring *rds_ring;
864
865 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
866 return 0;
867
868 err = qlcnic_init_firmware(adapter);
869 if (err)
870 return err;
871
872 err = qlcnic_napi_add(adapter, netdev);
873 if (err)
874 return err;
875
876 err = qlcnic_alloc_sw_resources(adapter);
877 if (err) {
878 dev_err(&pdev->dev, "Error in setting sw resources\n");
879 return err;
880 }
881
882 err = qlcnic_alloc_hw_resources(adapter);
883 if (err) {
884 dev_err(&pdev->dev, "Error in setting hw resources\n");
885 goto err_out_free_sw;
886 }
887
888
889 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
890 rds_ring = &adapter->recv_ctx.rds_rings[ring];
891 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
892 }
893
894 err = qlcnic_request_irq(adapter);
895 if (err) {
896 dev_err(&pdev->dev, "failed to setup interrupt\n");
897 goto err_out_free_rxbuf;
898 }
899
900 qlcnic_init_coalesce_defaults(adapter);
901
902 qlcnic_create_sysfs_entries(adapter);
903
904 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
905 return 0;
906
907err_out_free_rxbuf:
908 qlcnic_release_rx_buffers(adapter);
909 qlcnic_free_hw_resources(adapter);
910err_out_free_sw:
911 qlcnic_free_sw_resources(adapter);
912 return err;
913}
914
915static void
916qlcnic_detach(struct qlcnic_adapter *adapter)
917{
918 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
919 return;
920
921 qlcnic_remove_sysfs_entries(adapter);
922
923 qlcnic_free_hw_resources(adapter);
924 qlcnic_release_rx_buffers(adapter);
925 qlcnic_free_irq(adapter);
926 qlcnic_napi_del(adapter);
927 qlcnic_free_sw_resources(adapter);
928
929 adapter->is_up = 0;
930}
931
932void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
933{
934 struct qlcnic_adapter *adapter = netdev_priv(netdev);
935 struct qlcnic_host_sds_ring *sds_ring;
936 int ring;
937
938 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
939 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
940 sds_ring = &adapter->recv_ctx.sds_rings[ring];
941 qlcnic_disable_int(sds_ring);
942 }
943 }
944
945 qlcnic_detach(adapter);
946
947 adapter->diag_test = 0;
948 adapter->max_sds_rings = max_sds_rings;
949
950 if (qlcnic_attach(adapter))
951 return;
952
953 if (netif_running(netdev))
954 __qlcnic_up(adapter, netdev);
955
956 netif_device_attach(netdev);
957}
958
959int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
960{
961 struct qlcnic_adapter *adapter = netdev_priv(netdev);
962 struct qlcnic_host_sds_ring *sds_ring;
963 int ring;
964 int ret;
965
966 netif_device_detach(netdev);
967
968 if (netif_running(netdev))
969 __qlcnic_down(adapter, netdev);
970
971 qlcnic_detach(adapter);
972
973 adapter->max_sds_rings = 1;
974 adapter->diag_test = test;
975
976 ret = qlcnic_attach(adapter);
977 if (ret)
978 return ret;
979
980 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
981 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
982 sds_ring = &adapter->recv_ctx.sds_rings[ring];
983 qlcnic_enable_int(sds_ring);
984 }
985 }
986
987 return 0;
988}
989
990int
991qlcnic_reset_context(struct qlcnic_adapter *adapter)
992{
993 int err = 0;
994 struct net_device *netdev = adapter->netdev;
995
996 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
997 return -EBUSY;
998
999 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1000
1001 netif_device_detach(netdev);
1002
1003 if (netif_running(netdev))
1004 __qlcnic_down(adapter, netdev);
1005
1006 qlcnic_detach(adapter);
1007
1008 if (netif_running(netdev)) {
1009 err = qlcnic_attach(adapter);
1010 if (!err)
1011 err = __qlcnic_up(adapter, netdev);
1012
1013 if (err)
1014 goto done;
1015 }
1016
1017 netif_device_attach(netdev);
1018 }
1019
1020done:
1021 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1022 return err;
1023}
1024
1025static int
1026qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1027 struct net_device *netdev)
1028{
1029 int err;
1030 struct pci_dev *pdev = adapter->pdev;
1031
1032 adapter->rx_csum = 1;
1033 adapter->mc_enabled = 0;
1034 adapter->max_mc_count = 38;
1035
1036 netdev->netdev_ops = &qlcnic_netdev_ops;
1037 netdev->watchdog_timeo = 2*HZ;
1038
1039 qlcnic_change_mtu(netdev, netdev->mtu);
1040
1041 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1042
1043 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
1044 netdev->features |= (NETIF_F_GRO);
1045 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
1046
1047 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1048 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1049
1050 if (adapter->pci_using_dac) {
1051 netdev->features |= NETIF_F_HIGHDMA;
1052 netdev->vlan_features |= NETIF_F_HIGHDMA;
1053 }
1054
1055 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1056 netdev->features |= (NETIF_F_HW_VLAN_TX);
1057
1058 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1059 netdev->features |= NETIF_F_LRO;
1060
1061 netdev->irq = adapter->msix_entries[0].vector;
1062
1063 INIT_WORK(&adapter->tx_timeout_task, qlcnic_tx_timeout_task);
1064
1065 if (qlcnic_read_mac_addr(adapter))
1066 dev_warn(&pdev->dev, "failed to read mac addr\n");
1067
1068 netif_carrier_off(netdev);
1069 netif_stop_queue(netdev);
1070
1071 err = register_netdev(netdev);
1072 if (err) {
1073 dev_err(&pdev->dev, "failed to register net device\n");
1074 return err;
1075 }
1076
1077 return 0;
1078}
1079
1080static int __devinit
1081qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1082{
1083 struct net_device *netdev = NULL;
1084 struct qlcnic_adapter *adapter = NULL;
1085 int err;
1086 int pci_func_id = PCI_FUNC(pdev->devfn);
1087 uint8_t revision_id;
1088
1089 err = pci_enable_device(pdev);
1090 if (err)
1091 return err;
1092
1093 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1094 err = -ENODEV;
1095 goto err_out_disable_pdev;
1096 }
1097
1098 err = pci_request_regions(pdev, qlcnic_driver_name);
1099 if (err)
1100 goto err_out_disable_pdev;
1101
1102 pci_set_master(pdev);
1103
1104 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1105 if (!netdev) {
1106 dev_err(&pdev->dev, "failed to allocate net_device\n");
1107 err = -ENOMEM;
1108 goto err_out_free_res;
1109 }
1110
1111 SET_NETDEV_DEV(netdev, &pdev->dev);
1112
1113 adapter = netdev_priv(netdev);
1114 adapter->netdev = netdev;
1115 adapter->pdev = pdev;
1116 adapter->ahw.pci_func = pci_func_id;
1117
1118 revision_id = pdev->revision;
1119 adapter->ahw.revision_id = revision_id;
1120
1121 rwlock_init(&adapter->ahw.crb_lock);
1122 mutex_init(&adapter->ahw.mem_lock);
1123
1124 spin_lock_init(&adapter->tx_clean_lock);
1125 INIT_LIST_HEAD(&adapter->mac_list);
1126
1127 err = qlcnic_setup_pci_map(adapter);
1128 if (err)
1129 goto err_out_free_netdev;
1130
1131 /* This will be reset for mezz cards */
1132 adapter->portnum = pci_func_id;
1133
1134 err = qlcnic_get_board_info(adapter);
1135 if (err) {
1136 dev_err(&pdev->dev, "Error getting board config info.\n");
1137 goto err_out_iounmap;
1138 }
1139
1140
1141 err = qlcnic_start_firmware(adapter);
1142 if (err)
1143 goto err_out_decr_ref;
1144
1145 /*
1146 * See if the firmware gave us a virtual-physical port mapping.
1147 */
1148 adapter->physical_port = adapter->portnum;
1149
1150 qlcnic_clear_stats(adapter);
1151
1152 qlcnic_setup_intr(adapter);
1153
1154 err = qlcnic_setup_netdev(adapter, netdev);
1155 if (err)
1156 goto err_out_disable_msi;
1157
1158 pci_set_drvdata(pdev, adapter);
1159
1160 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1161
1162 switch (adapter->ahw.port_type) {
1163 case QLCNIC_GBE:
1164 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1165 adapter->netdev->name);
1166 break;
1167 case QLCNIC_XGBE:
1168 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1169 adapter->netdev->name);
1170 break;
1171 }
1172
1173 qlcnic_create_diag_entries(adapter);
1174
1175 return 0;
1176
1177err_out_disable_msi:
1178 qlcnic_teardown_intr(adapter);
1179
1180err_out_decr_ref:
1181 qlcnic_clr_all_drv_state(adapter);
1182
1183err_out_iounmap:
1184 qlcnic_cleanup_pci_map(adapter);
1185
1186err_out_free_netdev:
1187 free_netdev(netdev);
1188
1189err_out_free_res:
1190 pci_release_regions(pdev);
1191
1192err_out_disable_pdev:
1193 pci_set_drvdata(pdev, NULL);
1194 pci_disable_device(pdev);
1195 return err;
1196}
1197
1198static void __devexit qlcnic_remove(struct pci_dev *pdev)
1199{
1200 struct qlcnic_adapter *adapter;
1201 struct net_device *netdev;
1202
1203 adapter = pci_get_drvdata(pdev);
1204 if (adapter == NULL)
1205 return;
1206
1207 netdev = adapter->netdev;
1208
1209 qlcnic_cancel_fw_work(adapter);
1210
1211 unregister_netdev(netdev);
1212
1213 cancel_work_sync(&adapter->tx_timeout_task);
1214
1215 qlcnic_detach(adapter);
1216
1217 qlcnic_clr_all_drv_state(adapter);
1218
1219 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1220
1221 qlcnic_teardown_intr(adapter);
1222
1223 qlcnic_remove_diag_entries(adapter);
1224
1225 qlcnic_cleanup_pci_map(adapter);
1226
1227 qlcnic_release_firmware(adapter);
1228
1229 pci_release_regions(pdev);
1230 pci_disable_device(pdev);
1231 pci_set_drvdata(pdev, NULL);
1232
1233 free_netdev(netdev);
1234}
1235static int __qlcnic_shutdown(struct pci_dev *pdev)
1236{
1237 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1238 struct net_device *netdev = adapter->netdev;
1239 int retval;
1240
1241 netif_device_detach(netdev);
1242
1243 qlcnic_cancel_fw_work(adapter);
1244
1245 if (netif_running(netdev))
1246 qlcnic_down(adapter, netdev);
1247
1248 cancel_work_sync(&adapter->tx_timeout_task);
1249
1250 qlcnic_detach(adapter);
1251
1252 qlcnic_clr_all_drv_state(adapter);
1253
1254 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1255
1256 retval = pci_save_state(pdev);
1257 if (retval)
1258 return retval;
1259
1260 if (qlcnic_wol_supported(adapter)) {
1261 pci_enable_wake(pdev, PCI_D3cold, 1);
1262 pci_enable_wake(pdev, PCI_D3hot, 1);
1263 }
1264
1265 return 0;
1266}
1267
1268static void qlcnic_shutdown(struct pci_dev *pdev)
1269{
1270 if (__qlcnic_shutdown(pdev))
1271 return;
1272
1273 pci_disable_device(pdev);
1274}
1275
1276#ifdef CONFIG_PM
1277static int
1278qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1279{
1280 int retval;
1281
1282 retval = __qlcnic_shutdown(pdev);
1283 if (retval)
1284 return retval;
1285
1286 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1287 return 0;
1288}
1289
1290static int
1291qlcnic_resume(struct pci_dev *pdev)
1292{
1293 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1294 struct net_device *netdev = adapter->netdev;
1295 int err;
1296
1297 err = pci_enable_device(pdev);
1298 if (err)
1299 return err;
1300
1301 pci_set_power_state(pdev, PCI_D0);
1302 pci_set_master(pdev);
1303 pci_restore_state(pdev);
1304
1305 adapter->ahw.crb_win = -1;
1306 adapter->ahw.ocm_win = -1;
1307
1308 err = qlcnic_start_firmware(adapter);
1309 if (err) {
1310 dev_err(&pdev->dev, "failed to start firmware\n");
1311 return err;
1312 }
1313
1314 if (netif_running(netdev)) {
1315 err = qlcnic_attach(adapter);
1316 if (err)
1317 goto err_out;
1318
1319 err = qlcnic_up(adapter, netdev);
1320 if (err)
1321 goto err_out_detach;
1322
1323
1324 qlcnic_config_indev_addr(netdev, NETDEV_UP);
1325 }
1326
1327 netif_device_attach(netdev);
1328 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1329 return 0;
1330
1331err_out_detach:
1332 qlcnic_detach(adapter);
1333err_out:
1334 qlcnic_clr_all_drv_state(adapter);
1335 return err;
1336}
1337#endif
1338
1339static int qlcnic_open(struct net_device *netdev)
1340{
1341 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1342 int err;
1343
1344 if (adapter->driver_mismatch)
1345 return -EIO;
1346
1347 err = qlcnic_attach(adapter);
1348 if (err)
1349 return err;
1350
1351 err = __qlcnic_up(adapter, netdev);
1352 if (err)
1353 goto err_out;
1354
1355 netif_start_queue(netdev);
1356
1357 return 0;
1358
1359err_out:
1360 qlcnic_detach(adapter);
1361 return err;
1362}
1363
1364/*
1365 * qlcnic_close - Disables a network interface entry point
1366 */
1367static int qlcnic_close(struct net_device *netdev)
1368{
1369 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1370
1371 __qlcnic_down(adapter, netdev);
1372 return 0;
1373}
1374
1375static void
1376qlcnic_tso_check(struct net_device *netdev,
1377 struct qlcnic_host_tx_ring *tx_ring,
1378 struct cmd_desc_type0 *first_desc,
1379 struct sk_buff *skb)
1380{
1381 u8 opcode = TX_ETHER_PKT;
1382 __be16 protocol = skb->protocol;
1383 u16 flags = 0, vid = 0;
1384 u32 producer;
1385 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
1386 struct cmd_desc_type0 *hwdesc;
1387 struct vlan_ethhdr *vh;
1388
1389 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1390
1391 vh = (struct vlan_ethhdr *)skb->data;
1392 protocol = vh->h_vlan_encapsulated_proto;
1393 flags = FLAGS_VLAN_TAGGED;
1394
1395 } else if (vlan_tx_tag_present(skb)) {
1396
1397 flags = FLAGS_VLAN_OOB;
1398 vid = vlan_tx_tag_get(skb);
1399 qlcnic_set_tx_vlan_tci(first_desc, vid);
1400 vlan_oob = 1;
1401 }
1402
1403 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1404 skb_shinfo(skb)->gso_size > 0) {
1405
1406 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1407
1408 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1409 first_desc->total_hdr_length = hdr_len;
1410 if (vlan_oob) {
1411 first_desc->total_hdr_length += VLAN_HLEN;
1412 first_desc->tcp_hdr_offset = VLAN_HLEN;
1413 first_desc->ip_hdr_offset = VLAN_HLEN;
1414 /* Only in case of TSO on vlan device */
1415 flags |= FLAGS_VLAN_TAGGED;
1416 }
1417
1418 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1419 TX_TCP_LSO6 : TX_TCP_LSO;
1420 tso = 1;
1421
1422 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1423 u8 l4proto;
1424
1425 if (protocol == cpu_to_be16(ETH_P_IP)) {
1426 l4proto = ip_hdr(skb)->protocol;
1427
1428 if (l4proto == IPPROTO_TCP)
1429 opcode = TX_TCP_PKT;
1430 else if (l4proto == IPPROTO_UDP)
1431 opcode = TX_UDP_PKT;
1432 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1433 l4proto = ipv6_hdr(skb)->nexthdr;
1434
1435 if (l4proto == IPPROTO_TCP)
1436 opcode = TX_TCPV6_PKT;
1437 else if (l4proto == IPPROTO_UDP)
1438 opcode = TX_UDPV6_PKT;
1439 }
1440 }
1441
1442 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1443 first_desc->ip_hdr_offset += skb_network_offset(skb);
1444 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1445
1446 if (!tso)
1447 return;
1448
1449 /* For LSO, we need to copy the MAC/IP/TCP headers into
1450 * the descriptor ring
1451 */
1452 producer = tx_ring->producer;
1453 copied = 0;
1454 offset = 2;
1455
1456 if (vlan_oob) {
1457 /* Create a TSO vlan header template for firmware */
1458
1459 hwdesc = &tx_ring->desc_head[producer];
1460 tx_ring->cmd_buf_arr[producer].skb = NULL;
1461
1462 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1463 hdr_len + VLAN_HLEN);
1464
1465 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1466 skb_copy_from_linear_data(skb, vh, 12);
1467 vh->h_vlan_proto = htons(ETH_P_8021Q);
1468 vh->h_vlan_TCI = htons(vid);
1469 skb_copy_from_linear_data_offset(skb, 12,
1470 (char *)vh + 16, copy_len - 16);
1471
1472 copied = copy_len - VLAN_HLEN;
1473 offset = 0;
1474
1475 producer = get_next_index(producer, tx_ring->num_desc);
1476 }
1477
1478 while (copied < hdr_len) {
1479
1480 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1481 (hdr_len - copied));
1482
1483 hwdesc = &tx_ring->desc_head[producer];
1484 tx_ring->cmd_buf_arr[producer].skb = NULL;
1485
1486 skb_copy_from_linear_data_offset(skb, copied,
1487 (char *)hwdesc + offset, copy_len);
1488
1489 copied += copy_len;
1490 offset = 0;
1491
1492 producer = get_next_index(producer, tx_ring->num_desc);
1493 }
1494
1495 tx_ring->producer = producer;
1496 barrier();
1497}
1498
1499static int
1500qlcnic_map_tx_skb(struct pci_dev *pdev,
1501 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
1502{
1503 struct qlcnic_skb_frag *nf;
1504 struct skb_frag_struct *frag;
1505 int i, nr_frags;
1506 dma_addr_t map;
1507
1508 nr_frags = skb_shinfo(skb)->nr_frags;
1509 nf = &pbuf->frag_array[0];
1510
1511 map = pci_map_single(pdev, skb->data,
1512 skb_headlen(skb), PCI_DMA_TODEVICE);
1513 if (pci_dma_mapping_error(pdev, map))
1514 goto out_err;
1515
1516 nf->dma = map;
1517 nf->length = skb_headlen(skb);
1518
1519 for (i = 0; i < nr_frags; i++) {
1520 frag = &skb_shinfo(skb)->frags[i];
1521 nf = &pbuf->frag_array[i+1];
1522
1523 map = pci_map_page(pdev, frag->page, frag->page_offset,
1524 frag->size, PCI_DMA_TODEVICE);
1525 if (pci_dma_mapping_error(pdev, map))
1526 goto unwind;
1527
1528 nf->dma = map;
1529 nf->length = frag->size;
1530 }
1531
1532 return 0;
1533
1534unwind:
1535 while (--i >= 0) {
1536 nf = &pbuf->frag_array[i+1];
1537 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1538 }
1539
1540 nf = &pbuf->frag_array[0];
1541 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1542
1543out_err:
1544 return -ENOMEM;
1545}
1546
1547static inline void
1548qlcnic_clear_cmddesc(u64 *desc)
1549{
1550 desc[0] = 0ULL;
1551 desc[2] = 0ULL;
1552}
1553
1554netdev_tx_t
1555qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1556{
1557 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1558 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1559 struct qlcnic_cmd_buffer *pbuf;
1560 struct qlcnic_skb_frag *buffrag;
1561 struct cmd_desc_type0 *hwdesc, *first_desc;
1562 struct pci_dev *pdev;
1563 int i, k;
1564
1565 u32 producer;
1566 int frag_count, no_of_desc;
1567 u32 num_txd = tx_ring->num_desc;
1568
1569 frag_count = skb_shinfo(skb)->nr_frags + 1;
1570
1571 /* 4 fragments per cmd des */
1572 no_of_desc = (frag_count + 3) >> 2;
1573
1574 if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) {
1575 netif_stop_queue(netdev);
1576 return NETDEV_TX_BUSY;
1577 }
1578
1579 producer = tx_ring->producer;
1580 pbuf = &tx_ring->cmd_buf_arr[producer];
1581
1582 pdev = adapter->pdev;
1583
1584 if (qlcnic_map_tx_skb(pdev, skb, pbuf))
1585 goto drop_packet;
1586
1587 pbuf->skb = skb;
1588 pbuf->frag_count = frag_count;
1589
1590 first_desc = hwdesc = &tx_ring->desc_head[producer];
1591 qlcnic_clear_cmddesc((u64 *)hwdesc);
1592
1593 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
1594 qlcnic_set_tx_port(first_desc, adapter->portnum);
1595
1596 for (i = 0; i < frag_count; i++) {
1597
1598 k = i % 4;
1599
1600 if ((k == 0) && (i > 0)) {
1601 /* move to next desc.*/
1602 producer = get_next_index(producer, num_txd);
1603 hwdesc = &tx_ring->desc_head[producer];
1604 qlcnic_clear_cmddesc((u64 *)hwdesc);
1605 tx_ring->cmd_buf_arr[producer].skb = NULL;
1606 }
1607
1608 buffrag = &pbuf->frag_array[i];
1609
1610 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
1611 switch (k) {
1612 case 0:
1613 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1614 break;
1615 case 1:
1616 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
1617 break;
1618 case 2:
1619 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
1620 break;
1621 case 3:
1622 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
1623 break;
1624 }
1625 }
1626
1627 tx_ring->producer = get_next_index(producer, num_txd);
1628
1629 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
1630
1631 qlcnic_update_cmd_producer(adapter, tx_ring);
1632
1633 adapter->stats.txbytes += skb->len;
1634 adapter->stats.xmitcalled++;
1635
1636 return NETDEV_TX_OK;
1637
1638drop_packet:
1639 adapter->stats.txdropped++;
1640 dev_kfree_skb_any(skb);
1641 return NETDEV_TX_OK;
1642}
1643
1644static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
1645{
1646 struct net_device *netdev = adapter->netdev;
1647 u32 temp, temp_state, temp_val;
1648 int rv = 0;
1649
1650 temp = QLCRD32(adapter, CRB_TEMP_STATE);
1651
1652 temp_state = qlcnic_get_temp_state(temp);
1653 temp_val = qlcnic_get_temp_val(temp);
1654
1655 if (temp_state == QLCNIC_TEMP_PANIC) {
1656 dev_err(&netdev->dev,
1657 "Device temperature %d degrees C exceeds"
1658 " maximum allowed. Hardware has been shut down.\n",
1659 temp_val);
1660 rv = 1;
1661 } else if (temp_state == QLCNIC_TEMP_WARN) {
1662 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
1663 dev_err(&netdev->dev,
1664 "Device temperature %d degrees C "
1665 "exceeds operating range."
1666 " Immediate action needed.\n",
1667 temp_val);
1668 }
1669 } else {
1670 if (adapter->temp == QLCNIC_TEMP_WARN) {
1671 dev_info(&netdev->dev,
1672 "Device temperature is now %d degrees C"
1673 " in normal range.\n", temp_val);
1674 }
1675 }
1676 adapter->temp = temp_state;
1677 return rv;
1678}
1679
1680void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
1681{
1682 struct net_device *netdev = adapter->netdev;
1683
1684 if (adapter->ahw.linkup && !linkup) {
1685 dev_info(&netdev->dev, "NIC Link is down\n");
1686 adapter->ahw.linkup = 0;
1687 if (netif_running(netdev)) {
1688 netif_carrier_off(netdev);
1689 netif_stop_queue(netdev);
1690 }
1691 } else if (!adapter->ahw.linkup && linkup) {
1692 dev_info(&netdev->dev, "NIC Link is up\n");
1693 adapter->ahw.linkup = 1;
1694 if (netif_running(netdev)) {
1695 netif_carrier_on(netdev);
1696 netif_wake_queue(netdev);
1697 }
1698 }
1699}
1700
1701static void qlcnic_tx_timeout(struct net_device *netdev)
1702{
1703 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1704
1705 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
1706 return;
1707
1708 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
1709 schedule_work(&adapter->tx_timeout_task);
1710}
1711
1712static void qlcnic_tx_timeout_task(struct work_struct *work)
1713{
1714 struct qlcnic_adapter *adapter =
1715 container_of(work, struct qlcnic_adapter, tx_timeout_task);
1716
1717 if (!netif_running(adapter->netdev))
1718 return;
1719
1720 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1721 return;
1722
1723 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
1724 goto request_reset;
1725
1726 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1727 if (!qlcnic_reset_context(adapter)) {
1728 adapter->netdev->trans_start = jiffies;
1729 return;
1730
1731 /* context reset failed, fall through for fw reset */
1732 }
1733
1734request_reset:
1735 adapter->need_fw_reset = 1;
1736 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1737}
1738
1739static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
1740{
1741 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1742 struct net_device_stats *stats = &netdev->stats;
1743
1744 memset(stats, 0, sizeof(*stats));
1745
1746 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
1747 stats->tx_packets = adapter->stats.xmitfinished;
1748 stats->rx_bytes = adapter->stats.rxbytes;
1749 stats->tx_bytes = adapter->stats.txbytes;
1750 stats->rx_dropped = adapter->stats.rxdropped;
1751 stats->tx_dropped = adapter->stats.txdropped;
1752
1753 return stats;
1754}
1755
1756static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
1757{
1758 u32 status;
1759
1760 status = readl(adapter->isr_int_vec);
1761
1762 if (!(status & adapter->int_vec_bit))
1763 return IRQ_NONE;
1764
1765 /* check interrupt state machine, to be sure */
1766 status = readl(adapter->crb_int_state_reg);
1767 if (!ISR_LEGACY_INT_TRIGGERED(status))
1768 return IRQ_NONE;
1769
1770 writel(0xffffffff, adapter->tgt_status_reg);
1771 /* read twice to ensure write is flushed */
1772 readl(adapter->isr_int_vec);
1773 readl(adapter->isr_int_vec);
1774
1775 return IRQ_HANDLED;
1776}
1777
1778static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
1779{
1780 struct qlcnic_host_sds_ring *sds_ring = data;
1781 struct qlcnic_adapter *adapter = sds_ring->adapter;
1782
1783 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1784 goto done;
1785 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
1786 writel(0xffffffff, adapter->tgt_status_reg);
1787 goto done;
1788 }
1789
1790 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
1791 return IRQ_NONE;
1792
1793done:
1794 adapter->diag_cnt++;
1795 qlcnic_enable_int(sds_ring);
1796 return IRQ_HANDLED;
1797}
1798
1799static irqreturn_t qlcnic_intr(int irq, void *data)
1800{
1801 struct qlcnic_host_sds_ring *sds_ring = data;
1802 struct qlcnic_adapter *adapter = sds_ring->adapter;
1803
1804 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
1805 return IRQ_NONE;
1806
1807 napi_schedule(&sds_ring->napi);
1808
1809 return IRQ_HANDLED;
1810}
1811
1812static irqreturn_t qlcnic_msi_intr(int irq, void *data)
1813{
1814 struct qlcnic_host_sds_ring *sds_ring = data;
1815 struct qlcnic_adapter *adapter = sds_ring->adapter;
1816
1817 /* clear interrupt */
1818 writel(0xffffffff, adapter->tgt_status_reg);
1819
1820 napi_schedule(&sds_ring->napi);
1821 return IRQ_HANDLED;
1822}
1823
1824static irqreturn_t qlcnic_msix_intr(int irq, void *data)
1825{
1826 struct qlcnic_host_sds_ring *sds_ring = data;
1827
1828 napi_schedule(&sds_ring->napi);
1829 return IRQ_HANDLED;
1830}
1831
1832static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
1833{
1834 u32 sw_consumer, hw_consumer;
1835 int count = 0, i;
1836 struct qlcnic_cmd_buffer *buffer;
1837 struct pci_dev *pdev = adapter->pdev;
1838 struct net_device *netdev = adapter->netdev;
1839 struct qlcnic_skb_frag *frag;
1840 int done;
1841 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1842
1843 if (!spin_trylock(&adapter->tx_clean_lock))
1844 return 1;
1845
1846 sw_consumer = tx_ring->sw_consumer;
1847 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1848
1849 while (sw_consumer != hw_consumer) {
1850 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
1851 if (buffer->skb) {
1852 frag = &buffer->frag_array[0];
1853 pci_unmap_single(pdev, frag->dma, frag->length,
1854 PCI_DMA_TODEVICE);
1855 frag->dma = 0ULL;
1856 for (i = 1; i < buffer->frag_count; i++) {
1857 frag++;
1858 pci_unmap_page(pdev, frag->dma, frag->length,
1859 PCI_DMA_TODEVICE);
1860 frag->dma = 0ULL;
1861 }
1862
1863 adapter->stats.xmitfinished++;
1864 dev_kfree_skb_any(buffer->skb);
1865 buffer->skb = NULL;
1866 }
1867
1868 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
1869 if (++count >= MAX_STATUS_HANDLE)
1870 break;
1871 }
1872
1873 if (count && netif_running(netdev)) {
1874 tx_ring->sw_consumer = sw_consumer;
1875
1876 smp_mb();
1877
1878 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
1879 __netif_tx_lock(tx_ring->txq, smp_processor_id());
1880 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
1881 netif_wake_queue(netdev);
1882 adapter->tx_timeo_cnt = 0;
1883 }
1884 __netif_tx_unlock(tx_ring->txq);
1885 }
1886 }
1887 /*
1888 * If everything is freed up to consumer then check if the ring is full
1889 * If the ring is full then check if more needs to be freed and
1890 * schedule the call back again.
1891 *
1892 * This happens when there are 2 CPUs. One could be freeing and the
1893 * other filling it. If the ring is full when we get out of here and
1894 * the card has already interrupted the host then the host can miss the
1895 * interrupt.
1896 *
1897 * There is still a possible race condition and the host could miss an
1898 * interrupt. The card has to take care of this.
1899 */
1900 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1901 done = (sw_consumer == hw_consumer);
1902 spin_unlock(&adapter->tx_clean_lock);
1903
1904 return done;
1905}
1906
1907static int qlcnic_poll(struct napi_struct *napi, int budget)
1908{
1909 struct qlcnic_host_sds_ring *sds_ring =
1910 container_of(napi, struct qlcnic_host_sds_ring, napi);
1911
1912 struct qlcnic_adapter *adapter = sds_ring->adapter;
1913
1914 int tx_complete;
1915 int work_done;
1916
1917 tx_complete = qlcnic_process_cmd_ring(adapter);
1918
1919 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
1920
1921 if ((work_done < budget) && tx_complete) {
1922 napi_complete(&sds_ring->napi);
1923 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1924 qlcnic_enable_int(sds_ring);
1925 }
1926
1927 return work_done;
1928}
1929
1930#ifdef CONFIG_NET_POLL_CONTROLLER
1931static void qlcnic_poll_controller(struct net_device *netdev)
1932{
1933 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1934 disable_irq(adapter->irq);
1935 qlcnic_intr(adapter->irq, adapter);
1936 enable_irq(adapter->irq);
1937}
1938#endif
1939
1940static void
1941qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state)
1942{
1943 u32 val;
1944
1945 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
1946 state != QLCNIC_DEV_NEED_QUISCENT);
1947
1948 if (qlcnic_api_lock(adapter))
1949 return ;
1950
1951 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1952
1953 if (state == QLCNIC_DEV_NEED_RESET)
1954 val |= ((u32)0x1 << (adapter->portnum * 4));
1955 else if (state == QLCNIC_DEV_NEED_QUISCENT)
1956 val |= ((u32)0x1 << ((adapter->portnum * 4) + 1));
1957
1958 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1959
1960 qlcnic_api_unlock(adapter);
1961}
1962
1963static int
1964qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
1965{
1966 u32 val;
1967
1968 if (qlcnic_api_lock(adapter))
1969 return -EBUSY;
1970
1971 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1972 val &= ~((u32)0x3 << (adapter->portnum * 4));
1973 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1974
1975 qlcnic_api_unlock(adapter);
1976
1977 return 0;
1978}
1979
1980static void
1981qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
1982{
1983 u32 val;
1984
1985 if (qlcnic_api_lock(adapter))
1986 goto err;
1987
1988 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
1989 val &= ~((u32)0x1 << (adapter->portnum * 4));
1990 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
1991
1992 if (!(val & 0x11111111))
1993 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
1994
1995 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1996 val &= ~((u32)0x3 << (adapter->portnum * 4));
1997 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1998
1999 qlcnic_api_unlock(adapter);
2000err:
2001 adapter->fw_fail_cnt = 0;
2002 clear_bit(__QLCNIC_START_FW, &adapter->state);
2003 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2004}
2005
2006static int
2007qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2008{
2009 int act, state;
2010
2011 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2012 act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
2013
2014 if (((state & 0x11111111) == (act & 0x11111111)) ||
2015 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2016 return 0;
2017 else
2018 return 1;
2019}
2020
2021static int
2022qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2023{
2024 u32 val, prev_state;
2025 int cnt = 0;
2026 int portnum = adapter->portnum;
2027
2028 if (qlcnic_api_lock(adapter))
2029 return -1;
2030
2031 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
2032 if (!(val & ((int)0x1 << (portnum * 4)))) {
2033 val |= ((u32)0x1 << (portnum * 4));
2034 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
2035 } else if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state)) {
2036 goto start_fw;
2037 }
2038
2039 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2040
2041 switch (prev_state) {
2042 case QLCNIC_DEV_COLD:
2043start_fw:
2044 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITALIZING);
2045 qlcnic_api_unlock(adapter);
2046 return 1;
2047
2048 case QLCNIC_DEV_READY:
2049 qlcnic_api_unlock(adapter);
2050 return 0;
2051
2052 case QLCNIC_DEV_NEED_RESET:
2053 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2054 val |= ((u32)0x1 << (portnum * 4));
2055 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2056 break;
2057
2058 case QLCNIC_DEV_NEED_QUISCENT:
2059 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2060 val |= ((u32)0x1 << ((portnum * 4) + 1));
2061 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2062 break;
2063
2064 case QLCNIC_DEV_FAILED:
2065 qlcnic_api_unlock(adapter);
2066 return -1;
2067 }
2068
2069 qlcnic_api_unlock(adapter);
2070 msleep(1000);
2071 while ((QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) != QLCNIC_DEV_READY) &&
2072 ++cnt < 20)
2073 msleep(1000);
2074
2075 if (cnt >= 20)
2076 return -1;
2077
2078 if (qlcnic_api_lock(adapter))
2079 return -1;
2080
2081 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2082 val &= ~((u32)0x3 << (portnum * 4));
2083 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2084
2085 qlcnic_api_unlock(adapter);
2086
2087 return 0;
2088}
2089
2090static void
2091qlcnic_fwinit_work(struct work_struct *work)
2092{
2093 struct qlcnic_adapter *adapter = container_of(work,
2094 struct qlcnic_adapter, fw_work.work);
2095 int dev_state;
2096
2097 if (++adapter->fw_wait_cnt > FW_POLL_THRESH)
2098 goto err_ret;
2099
2100 if (test_bit(__QLCNIC_START_FW, &adapter->state)) {
2101
2102 if (qlcnic_check_drv_state(adapter)) {
2103 qlcnic_schedule_work(adapter,
2104 qlcnic_fwinit_work, FW_POLL_DELAY);
2105 return;
2106 }
2107
2108 if (!qlcnic_start_firmware(adapter)) {
2109 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2110 return;
2111 }
2112
2113 goto err_ret;
2114 }
2115
2116 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2117 switch (dev_state) {
2118 case QLCNIC_DEV_READY:
2119 if (!qlcnic_start_firmware(adapter)) {
2120 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2121 return;
2122 }
2123 case QLCNIC_DEV_FAILED:
2124 break;
2125
2126 default:
2127 qlcnic_schedule_work(adapter,
2128 qlcnic_fwinit_work, 2 * FW_POLL_DELAY);
2129 return;
2130 }
2131
2132err_ret:
2133 qlcnic_clr_all_drv_state(adapter);
2134}
2135
2136static void
2137qlcnic_detach_work(struct work_struct *work)
2138{
2139 struct qlcnic_adapter *adapter = container_of(work,
2140 struct qlcnic_adapter, fw_work.work);
2141 struct net_device *netdev = adapter->netdev;
2142 u32 status;
2143
2144 netif_device_detach(netdev);
2145
2146 qlcnic_down(adapter, netdev);
2147
2148 rtnl_lock();
2149 qlcnic_detach(adapter);
2150 rtnl_unlock();
2151
2152 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2153
2154 if (status & QLCNIC_RCODE_FATAL_ERROR)
2155 goto err_ret;
2156
2157 if (adapter->temp == QLCNIC_TEMP_PANIC)
2158 goto err_ret;
2159
2160 qlcnic_set_drv_state(adapter, adapter->dev_state);
2161
2162 adapter->fw_wait_cnt = 0;
2163
2164 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2165
2166 return;
2167
2168err_ret:
2169 qlcnic_clr_all_drv_state(adapter);
2170
2171}
2172
2173static void
2174qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2175{
2176 u32 state;
2177
2178 if (qlcnic_api_lock(adapter))
2179 return;
2180
2181 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2182
2183 if (state != QLCNIC_DEV_INITALIZING && state != QLCNIC_DEV_NEED_RESET) {
2184 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
2185 set_bit(__QLCNIC_START_FW, &adapter->state);
2186 }
2187
2188 qlcnic_api_unlock(adapter);
2189}
2190
2191static void
2192qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2193 work_func_t func, int delay)
2194{
2195 INIT_DELAYED_WORK(&adapter->fw_work, func);
2196 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
2197}
2198
2199static void
2200qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2201{
2202 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2203 msleep(10);
2204
2205 cancel_delayed_work_sync(&adapter->fw_work);
2206}
2207
2208static void
2209qlcnic_attach_work(struct work_struct *work)
2210{
2211 struct qlcnic_adapter *adapter = container_of(work,
2212 struct qlcnic_adapter, fw_work.work);
2213 struct net_device *netdev = adapter->netdev;
2214 int err;
2215
2216 if (netif_running(netdev)) {
2217 err = qlcnic_attach(adapter);
2218 if (err)
2219 goto done;
2220
2221 err = qlcnic_up(adapter, netdev);
2222 if (err) {
2223 qlcnic_detach(adapter);
2224 goto done;
2225 }
2226
2227 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2228 }
2229
2230 netif_device_attach(netdev);
2231
2232done:
2233 adapter->fw_fail_cnt = 0;
2234 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2235
2236 if (!qlcnic_clr_drv_state(adapter))
2237 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2238 FW_POLL_DELAY);
2239}
2240
2241static int
2242qlcnic_check_health(struct qlcnic_adapter *adapter)
2243{
2244 u32 state = 0, heartbit;
2245 struct net_device *netdev = adapter->netdev;
2246
2247 if (qlcnic_check_temp(adapter))
2248 goto detach;
2249
2250 if (adapter->need_fw_reset) {
2251 qlcnic_dev_request_reset(adapter);
2252 goto detach;
2253 }
2254
2255 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2256 if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
2257 adapter->need_fw_reset = 1;
2258
2259 heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2260 if (heartbit != adapter->heartbit) {
2261 adapter->heartbit = heartbit;
2262 adapter->fw_fail_cnt = 0;
2263 if (adapter->need_fw_reset)
2264 goto detach;
2265 return 0;
2266 }
2267
2268 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2269 return 0;
2270
2271 qlcnic_dev_request_reset(adapter);
2272
2273 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
2274
2275 dev_info(&netdev->dev, "firmware hang detected\n");
2276
2277detach:
2278 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2279 QLCNIC_DEV_NEED_RESET;
2280
2281 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
2282 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2283 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
2284
2285 return 1;
2286}
2287
2288static void
2289qlcnic_fw_poll_work(struct work_struct *work)
2290{
2291 struct qlcnic_adapter *adapter = container_of(work,
2292 struct qlcnic_adapter, fw_work.work);
2293
2294 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2295 goto reschedule;
2296
2297
2298 if (qlcnic_check_health(adapter))
2299 return;
2300
2301reschedule:
2302 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2303}
2304
2305static ssize_t
2306qlcnic_store_bridged_mode(struct device *dev,
2307 struct device_attribute *attr, const char *buf, size_t len)
2308{
2309 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2310 unsigned long new;
2311 int ret = -EINVAL;
2312
2313 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
2314 goto err_out;
2315
2316 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2317 goto err_out;
2318
2319 if (strict_strtoul(buf, 2, &new))
2320 goto err_out;
2321
2322 if (!qlcnic_config_bridged_mode(adapter, !!new))
2323 ret = len;
2324
2325err_out:
2326 return ret;
2327}
2328
2329static ssize_t
2330qlcnic_show_bridged_mode(struct device *dev,
2331 struct device_attribute *attr, char *buf)
2332{
2333 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2334 int bridged_mode = 0;
2335
2336 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2337 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
2338
2339 return sprintf(buf, "%d\n", bridged_mode);
2340}
2341
2342static struct device_attribute dev_attr_bridged_mode = {
2343 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
2344 .show = qlcnic_show_bridged_mode,
2345 .store = qlcnic_store_bridged_mode,
2346};
2347
2348static ssize_t
2349qlcnic_store_diag_mode(struct device *dev,
2350 struct device_attribute *attr, const char *buf, size_t len)
2351{
2352 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2353 unsigned long new;
2354
2355 if (strict_strtoul(buf, 2, &new))
2356 return -EINVAL;
2357
2358 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
2359 adapter->flags ^= QLCNIC_DIAG_ENABLED;
2360
2361 return len;
2362}
2363
2364static ssize_t
2365qlcnic_show_diag_mode(struct device *dev,
2366 struct device_attribute *attr, char *buf)
2367{
2368 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2369
2370 return sprintf(buf, "%d\n",
2371 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
2372}
2373
2374static struct device_attribute dev_attr_diag_mode = {
2375 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
2376 .show = qlcnic_show_diag_mode,
2377 .store = qlcnic_store_diag_mode,
2378};
2379
2380static int
2381qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
2382 loff_t offset, size_t size)
2383{
2384 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2385 return -EIO;
2386
2387 if ((size != 4) || (offset & 0x3))
2388 return -EINVAL;
2389
2390 if (offset < QLCNIC_PCI_CRBSPACE)
2391 return -EINVAL;
2392
2393 return 0;
2394}
2395
2396static ssize_t
2397qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
2398 char *buf, loff_t offset, size_t size)
2399{
2400 struct device *dev = container_of(kobj, struct device, kobj);
2401 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2402 u32 data;
2403 int ret;
2404
2405 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2406 if (ret != 0)
2407 return ret;
2408
2409 data = QLCRD32(adapter, offset);
2410 memcpy(buf, &data, size);
2411 return size;
2412}
2413
2414static ssize_t
2415qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
2416 char *buf, loff_t offset, size_t size)
2417{
2418 struct device *dev = container_of(kobj, struct device, kobj);
2419 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2420 u32 data;
2421 int ret;
2422
2423 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2424 if (ret != 0)
2425 return ret;
2426
2427 memcpy(&data, buf, size);
2428 QLCWR32(adapter, offset, data);
2429 return size;
2430}
2431
2432static int
2433qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
2434 loff_t offset, size_t size)
2435{
2436 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2437 return -EIO;
2438
2439 if ((size != 8) || (offset & 0x7))
2440 return -EIO;
2441
2442 return 0;
2443}
2444
2445static ssize_t
2446qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
2447 char *buf, loff_t offset, size_t size)
2448{
2449 struct device *dev = container_of(kobj, struct device, kobj);
2450 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2451 u64 data;
2452 int ret;
2453
2454 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
2455 if (ret != 0)
2456 return ret;
2457
2458 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
2459 return -EIO;
2460
2461 memcpy(buf, &data, size);
2462
2463 return size;
2464}
2465
2466static ssize_t
2467qlcnic_sysfs_write_mem(struct kobject *kobj, struct bin_attribute *attr,
2468 char *buf, loff_t offset, size_t size)
2469{
2470 struct device *dev = container_of(kobj, struct device, kobj);
2471 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2472 u64 data;
2473 int ret;
2474
2475 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
2476 if (ret != 0)
2477 return ret;
2478
2479 memcpy(&data, buf, size);
2480
2481 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
2482 return -EIO;
2483
2484 return size;
2485}
2486
2487
2488static struct bin_attribute bin_attr_crb = {
2489 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
2490 .size = 0,
2491 .read = qlcnic_sysfs_read_crb,
2492 .write = qlcnic_sysfs_write_crb,
2493};
2494
2495static struct bin_attribute bin_attr_mem = {
2496 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
2497 .size = 0,
2498 .read = qlcnic_sysfs_read_mem,
2499 .write = qlcnic_sysfs_write_mem,
2500};
2501
2502static void
2503qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
2504{
2505 struct device *dev = &adapter->pdev->dev;
2506
2507 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2508 if (device_create_file(dev, &dev_attr_bridged_mode))
2509 dev_warn(dev,
2510 "failed to create bridged_mode sysfs entry\n");
2511}
2512
2513static void
2514qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
2515{
2516 struct device *dev = &adapter->pdev->dev;
2517
2518 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2519 device_remove_file(dev, &dev_attr_bridged_mode);
2520}
2521
2522static void
2523qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
2524{
2525 struct device *dev = &adapter->pdev->dev;
2526
2527 if (device_create_file(dev, &dev_attr_diag_mode))
2528 dev_info(dev, "failed to create diag_mode sysfs entry\n");
2529 if (device_create_bin_file(dev, &bin_attr_crb))
2530 dev_info(dev, "failed to create crb sysfs entry\n");
2531 if (device_create_bin_file(dev, &bin_attr_mem))
2532 dev_info(dev, "failed to create mem sysfs entry\n");
2533}
2534
2535
2536static void
2537qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
2538{
2539 struct device *dev = &adapter->pdev->dev;
2540
2541 device_remove_file(dev, &dev_attr_diag_mode);
2542 device_remove_bin_file(dev, &bin_attr_crb);
2543 device_remove_bin_file(dev, &bin_attr_mem);
2544}
2545
2546#ifdef CONFIG_INET
2547
2548#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
2549
2550static int
2551qlcnic_destip_supported(struct qlcnic_adapter *adapter)
2552{
2553 if (adapter->ahw.cut_through)
2554 return 0;
2555
2556 return 1;
2557}
2558
2559static void
2560qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
2561{
2562 struct in_device *indev;
2563 struct qlcnic_adapter *adapter = netdev_priv(dev);
2564
2565 if (!qlcnic_destip_supported(adapter))
2566 return;
2567
2568 indev = in_dev_get(dev);
2569 if (!indev)
2570 return;
2571
2572 for_ifa(indev) {
2573 switch (event) {
2574 case NETDEV_UP:
2575 qlcnic_config_ipaddr(adapter,
2576 ifa->ifa_address, QLCNIC_IP_UP);
2577 break;
2578 case NETDEV_DOWN:
2579 qlcnic_config_ipaddr(adapter,
2580 ifa->ifa_address, QLCNIC_IP_DOWN);
2581 break;
2582 default:
2583 break;
2584 }
2585 } endfor_ifa(indev);
2586
2587 in_dev_put(indev);
2588 return;
2589}
2590
2591static int qlcnic_netdev_event(struct notifier_block *this,
2592 unsigned long event, void *ptr)
2593{
2594 struct qlcnic_adapter *adapter;
2595 struct net_device *dev = (struct net_device *)ptr;
2596
2597recheck:
2598 if (dev == NULL)
2599 goto done;
2600
2601 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2602 dev = vlan_dev_real_dev(dev);
2603 goto recheck;
2604 }
2605
2606 if (!is_qlcnic_netdev(dev))
2607 goto done;
2608
2609 adapter = netdev_priv(dev);
2610
2611 if (!adapter)
2612 goto done;
2613
2614 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2615 goto done;
2616
2617 qlcnic_config_indev_addr(dev, event);
2618done:
2619 return NOTIFY_DONE;
2620}
2621
2622static int
2623qlcnic_inetaddr_event(struct notifier_block *this,
2624 unsigned long event, void *ptr)
2625{
2626 struct qlcnic_adapter *adapter;
2627 struct net_device *dev;
2628
2629 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
2630
2631 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
2632
2633recheck:
2634 if (dev == NULL || !netif_running(dev))
2635 goto done;
2636
2637 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2638 dev = vlan_dev_real_dev(dev);
2639 goto recheck;
2640 }
2641
2642 if (!is_qlcnic_netdev(dev))
2643 goto done;
2644
2645 adapter = netdev_priv(dev);
2646
2647 if (!adapter || !qlcnic_destip_supported(adapter))
2648 goto done;
2649
2650 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2651 goto done;
2652
2653 switch (event) {
2654 case NETDEV_UP:
2655 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
2656 break;
2657 case NETDEV_DOWN:
2658 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
2659 break;
2660 default:
2661 break;
2662 }
2663
2664done:
2665 return NOTIFY_DONE;
2666}
2667
2668static struct notifier_block qlcnic_netdev_cb = {
2669 .notifier_call = qlcnic_netdev_event,
2670};
2671
2672static struct notifier_block qlcnic_inetaddr_cb = {
2673 .notifier_call = qlcnic_inetaddr_event,
2674};
2675#else
2676static void
2677qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
2678{ }
2679#endif
2680
2681static struct pci_driver qlcnic_driver = {
2682 .name = qlcnic_driver_name,
2683 .id_table = qlcnic_pci_tbl,
2684 .probe = qlcnic_probe,
2685 .remove = __devexit_p(qlcnic_remove),
2686#ifdef CONFIG_PM
2687 .suspend = qlcnic_suspend,
2688 .resume = qlcnic_resume,
2689#endif
2690 .shutdown = qlcnic_shutdown
2691};
2692
2693static int __init qlcnic_init_module(void)
2694{
2695
2696 printk(KERN_INFO "%s\n", qlcnic_driver_string);
2697
2698#ifdef CONFIG_INET
2699 register_netdevice_notifier(&qlcnic_netdev_cb);
2700 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
2701#endif
2702
2703
2704 return pci_register_driver(&qlcnic_driver);
2705}
2706
2707module_init(qlcnic_init_module);
2708
2709static void __exit qlcnic_exit_module(void)
2710{
2711
2712 pci_unregister_driver(&qlcnic_driver);
2713
2714#ifdef CONFIG_INET
2715 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
2716 unregister_netdevice_notifier(&qlcnic_netdev_cb);
2717#endif
2718}
2719
2720module_exit(qlcnic_exit_module);
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 862c1aaf3860..8b742b639ceb 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -19,14 +19,6 @@
19#define DRV_VERSION "v1.00.00.23.00.00-01" 19#define DRV_VERSION "v1.00.00.23.00.00-01"
20 20
21#define PFX "qlge: " 21#define PFX "qlge: "
22#define QPRINTK(qdev, nlevel, klevel, fmt, args...) \
23 do { \
24 if (!((qdev)->msg_enable & NETIF_MSG_##nlevel)) \
25 ; \
26 else \
27 dev_printk(KERN_##klevel, &((qdev)->pdev->dev), \
28 "%s: " fmt, __func__, ##args); \
29 } while (0)
30 22
31#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ 23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
32 24
@@ -54,12 +46,8 @@
54#define RX_RING_SHADOW_SPACE (sizeof(u64) + \ 46#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
55 MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \ 47 MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
56 MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64)) 48 MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
57#define SMALL_BUFFER_SIZE 512
58#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
59#define LARGE_BUFFER_MAX_SIZE 8192 49#define LARGE_BUFFER_MAX_SIZE 8192
60#define LARGE_BUFFER_MIN_SIZE 2048 50#define LARGE_BUFFER_MIN_SIZE 2048
61#define MAX_SPLIT_SIZE 1023
62#define QLGE_SB_PAD 32
63 51
64#define MAX_CQ 128 52#define MAX_CQ 128
65#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */ 53#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
@@ -79,15 +67,43 @@
79#define TX_DESC_PER_OAL 0 67#define TX_DESC_PER_OAL 0
80#endif 68#endif
81 69
70/* Word shifting for converting 64-bit
71 * address to a series of 16-bit words.
72 * This is used for some MPI firmware
73 * mailbox commands.
74 */
75#define LSW(x) ((u16)(x))
76#define MSW(x) ((u16)((u32)(x) >> 16))
77#define LSD(x) ((u32)((u64)(x)))
78#define MSD(x) ((u32)((((u64)(x)) >> 32)))
79
82/* MPI test register definitions. This register 80/* MPI test register definitions. This register
83 * is used for determining alternate NIC function's 81 * is used for determining alternate NIC function's
84 * PCI->func number. 82 * PCI->func number.
85 */ 83 */
86enum { 84enum {
87 MPI_TEST_FUNC_PORT_CFG = 0x1002, 85 MPI_TEST_FUNC_PORT_CFG = 0x1002,
86 MPI_TEST_FUNC_PRB_CTL = 0x100e,
87 MPI_TEST_FUNC_PRB_EN = 0x18a20000,
88 MPI_TEST_FUNC_RST_STS = 0x100a,
89 MPI_TEST_FUNC_RST_FRC = 0x00000003,
90 MPI_TEST_NIC_FUNC_MASK = 0x00000007,
91 MPI_TEST_NIC1_FUNCTION_ENABLE = (1 << 0),
92 MPI_TEST_NIC1_FUNCTION_MASK = 0x0000000e,
88 MPI_TEST_NIC1_FUNC_SHIFT = 1, 93 MPI_TEST_NIC1_FUNC_SHIFT = 1,
94 MPI_TEST_NIC2_FUNCTION_ENABLE = (1 << 4),
95 MPI_TEST_NIC2_FUNCTION_MASK = 0x000000e0,
89 MPI_TEST_NIC2_FUNC_SHIFT = 5, 96 MPI_TEST_NIC2_FUNC_SHIFT = 5,
90 MPI_TEST_NIC_FUNC_MASK = 0x00000007, 97 MPI_TEST_FC1_FUNCTION_ENABLE = (1 << 8),
98 MPI_TEST_FC1_FUNCTION_MASK = 0x00000e00,
99 MPI_TEST_FC1_FUNCTION_SHIFT = 9,
100 MPI_TEST_FC2_FUNCTION_ENABLE = (1 << 12),
101 MPI_TEST_FC2_FUNCTION_MASK = 0x0000e000,
102 MPI_TEST_FC2_FUNCTION_SHIFT = 13,
103
104 MPI_NIC_READ = 0x00000000,
105 MPI_NIC_REG_BLOCK = 0x00020000,
106 MPI_NIC_FUNCTION_SHIFT = 6,
91}; 107};
92 108
93/* 109/*
@@ -468,7 +484,7 @@ enum {
468 MDIO_PORT = 0x00000440, 484 MDIO_PORT = 0x00000440,
469 MDIO_STATUS = 0x00000450, 485 MDIO_STATUS = 0x00000450,
470 486
471 /* XGMAC AUX statistics registers */ 487 XGMAC_REGISTER_END = 0x00000740,
472}; 488};
473 489
474/* 490/*
@@ -509,6 +525,7 @@ enum {
509enum { 525enum {
510 MAC_ADDR_IDX_SHIFT = 4, 526 MAC_ADDR_IDX_SHIFT = 4,
511 MAC_ADDR_TYPE_SHIFT = 16, 527 MAC_ADDR_TYPE_SHIFT = 16,
528 MAC_ADDR_TYPE_COUNT = 10,
512 MAC_ADDR_TYPE_MASK = 0x000f0000, 529 MAC_ADDR_TYPE_MASK = 0x000f0000,
513 MAC_ADDR_TYPE_CAM_MAC = 0x00000000, 530 MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
514 MAC_ADDR_TYPE_MULTI_MAC = 0x00010000, 531 MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
@@ -526,6 +543,30 @@ enum {
526 MAC_ADDR_MR = (1 << 30), 543 MAC_ADDR_MR = (1 << 30),
527 MAC_ADDR_MW = (1 << 31), 544 MAC_ADDR_MW = (1 << 31),
528 MAX_MULTICAST_ENTRIES = 32, 545 MAX_MULTICAST_ENTRIES = 32,
546
547 /* Entry count and words per entry
548 * for each address type in the filter.
549 */
550 MAC_ADDR_MAX_CAM_ENTRIES = 512,
551 MAC_ADDR_MAX_CAM_WCOUNT = 3,
552 MAC_ADDR_MAX_MULTICAST_ENTRIES = 32,
553 MAC_ADDR_MAX_MULTICAST_WCOUNT = 2,
554 MAC_ADDR_MAX_VLAN_ENTRIES = 4096,
555 MAC_ADDR_MAX_VLAN_WCOUNT = 1,
556 MAC_ADDR_MAX_MCAST_FLTR_ENTRIES = 4096,
557 MAC_ADDR_MAX_MCAST_FLTR_WCOUNT = 1,
558 MAC_ADDR_MAX_FC_MAC_ENTRIES = 4,
559 MAC_ADDR_MAX_FC_MAC_WCOUNT = 2,
560 MAC_ADDR_MAX_MGMT_MAC_ENTRIES = 8,
561 MAC_ADDR_MAX_MGMT_MAC_WCOUNT = 2,
562 MAC_ADDR_MAX_MGMT_VLAN_ENTRIES = 16,
563 MAC_ADDR_MAX_MGMT_VLAN_WCOUNT = 1,
564 MAC_ADDR_MAX_MGMT_V4_ENTRIES = 4,
565 MAC_ADDR_MAX_MGMT_V4_WCOUNT = 1,
566 MAC_ADDR_MAX_MGMT_V6_ENTRIES = 4,
567 MAC_ADDR_MAX_MGMT_V6_WCOUNT = 4,
568 MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES = 4,
569 MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT = 1,
529}; 570};
530 571
531/* 572/*
@@ -596,6 +637,7 @@ enum {
596enum { 637enum {
597 RT_IDX_IDX_SHIFT = 8, 638 RT_IDX_IDX_SHIFT = 8,
598 RT_IDX_TYPE_MASK = 0x000f0000, 639 RT_IDX_TYPE_MASK = 0x000f0000,
640 RT_IDX_TYPE_SHIFT = 16,
599 RT_IDX_TYPE_RT = 0x00000000, 641 RT_IDX_TYPE_RT = 0x00000000,
600 RT_IDX_TYPE_RT_INV = 0x00010000, 642 RT_IDX_TYPE_RT_INV = 0x00010000,
601 RT_IDX_TYPE_NICQ = 0x00020000, 643 RT_IDX_TYPE_NICQ = 0x00020000,
@@ -664,7 +706,89 @@ enum {
664 RT_IDX_UNUSED013 = 13, 706 RT_IDX_UNUSED013 = 13,
665 RT_IDX_UNUSED014 = 14, 707 RT_IDX_UNUSED014 = 14,
666 RT_IDX_PROMISCUOUS_SLOT = 15, 708 RT_IDX_PROMISCUOUS_SLOT = 15,
667 RT_IDX_MAX_SLOTS = 16, 709 RT_IDX_MAX_RT_SLOTS = 8,
710 RT_IDX_MAX_NIC_SLOTS = 16,
711};
712
713/*
714 * Serdes Address Register (XG_SERDES_ADDR) bit definitions.
715 */
716enum {
717 XG_SERDES_ADDR_RDY = (1 << 31),
718 XG_SERDES_ADDR_R = (1 << 30),
719
720 XG_SERDES_ADDR_STS = 0x00001E06,
721 XG_SERDES_ADDR_XFI1_PWR_UP = 0x00000005,
722 XG_SERDES_ADDR_XFI2_PWR_UP = 0x0000000a,
723 XG_SERDES_ADDR_XAUI_PWR_DOWN = 0x00000001,
724
725 /* Serdes coredump definitions. */
726 XG_SERDES_XAUI_AN_START = 0x00000000,
727 XG_SERDES_XAUI_AN_END = 0x00000034,
728 XG_SERDES_XAUI_HSS_PCS_START = 0x00000800,
729 XG_SERDES_XAUI_HSS_PCS_END = 0x0000880,
730 XG_SERDES_XFI_AN_START = 0x00001000,
731 XG_SERDES_XFI_AN_END = 0x00001034,
732 XG_SERDES_XFI_TRAIN_START = 0x10001050,
733 XG_SERDES_XFI_TRAIN_END = 0x1000107C,
734 XG_SERDES_XFI_HSS_PCS_START = 0x00001800,
735 XG_SERDES_XFI_HSS_PCS_END = 0x00001838,
736 XG_SERDES_XFI_HSS_TX_START = 0x00001c00,
737 XG_SERDES_XFI_HSS_TX_END = 0x00001c1f,
738 XG_SERDES_XFI_HSS_RX_START = 0x00001c40,
739 XG_SERDES_XFI_HSS_RX_END = 0x00001c5f,
740 XG_SERDES_XFI_HSS_PLL_START = 0x00001e00,
741 XG_SERDES_XFI_HSS_PLL_END = 0x00001e1f,
742};
743
744/*
745 * NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions.
746 */
747enum {
748 PRB_MX_ADDR_ARE = (1 << 16),
749 PRB_MX_ADDR_UP = (1 << 15),
750 PRB_MX_ADDR_SWP = (1 << 14),
751
752 /* Module select values. */
753 PRB_MX_ADDR_MAX_MODS = 21,
754 PRB_MX_ADDR_MOD_SEL_SHIFT = 9,
755 PRB_MX_ADDR_MOD_SEL_TBD = 0,
756 PRB_MX_ADDR_MOD_SEL_IDE1 = 1,
757 PRB_MX_ADDR_MOD_SEL_IDE2 = 2,
758 PRB_MX_ADDR_MOD_SEL_FRB = 3,
759 PRB_MX_ADDR_MOD_SEL_ODE1 = 4,
760 PRB_MX_ADDR_MOD_SEL_ODE2 = 5,
761 PRB_MX_ADDR_MOD_SEL_DA1 = 6,
762 PRB_MX_ADDR_MOD_SEL_DA2 = 7,
763 PRB_MX_ADDR_MOD_SEL_IMP1 = 8,
764 PRB_MX_ADDR_MOD_SEL_IMP2 = 9,
765 PRB_MX_ADDR_MOD_SEL_OMP1 = 10,
766 PRB_MX_ADDR_MOD_SEL_OMP2 = 11,
767 PRB_MX_ADDR_MOD_SEL_ORS1 = 12,
768 PRB_MX_ADDR_MOD_SEL_ORS2 = 13,
769 PRB_MX_ADDR_MOD_SEL_REG = 14,
770 PRB_MX_ADDR_MOD_SEL_MAC1 = 16,
771 PRB_MX_ADDR_MOD_SEL_MAC2 = 17,
772 PRB_MX_ADDR_MOD_SEL_VQM1 = 18,
773 PRB_MX_ADDR_MOD_SEL_VQM2 = 19,
774 PRB_MX_ADDR_MOD_SEL_MOP = 20,
775 /* Bit fields indicating which modules
776 * are valid for each clock domain.
777 */
778 PRB_MX_ADDR_VALID_SYS_MOD = 0x000f7ff7,
779 PRB_MX_ADDR_VALID_PCI_MOD = 0x000040c1,
780 PRB_MX_ADDR_VALID_XGM_MOD = 0x00037309,
781 PRB_MX_ADDR_VALID_FC_MOD = 0x00003001,
782 PRB_MX_ADDR_VALID_TOTAL = 34,
783
784 /* Clock domain values. */
785 PRB_MX_ADDR_CLOCK_SHIFT = 6,
786 PRB_MX_ADDR_SYS_CLOCK = 0,
787 PRB_MX_ADDR_PCI_CLOCK = 2,
788 PRB_MX_ADDR_FC_CLOCK = 5,
789 PRB_MX_ADDR_XGM_CLOCK = 6,
790
791 PRB_MX_ADDR_MAX_MUX = 64,
668}; 792};
669 793
670/* 794/*
@@ -737,6 +861,21 @@ enum {
737 PRB_MX_DATA = 0xfc, /* Use semaphore */ 861 PRB_MX_DATA = 0xfc, /* Use semaphore */
738}; 862};
739 863
864#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
865#define SMALL_BUFFER_SIZE 256
866#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE
867#define SPLT_SETTING FSC_DBRST_1024
868#define SPLT_LEN 0
869#define QLGE_SB_PAD 0
870#else
871#define SMALL_BUFFER_SIZE 512
872#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
873#define SPLT_SETTING FSC_SH
874#define SPLT_LEN (SPLT_HDR_EP | \
875 min(SMALL_BUF_MAP_SIZE, 1023))
876#define QLGE_SB_PAD 32
877#endif
878
740/* 879/*
741 * CAM output format. 880 * CAM output format.
742 */ 881 */
@@ -1421,7 +1560,7 @@ struct nic_stats {
1421 u64 rx_nic_fifo_drop; 1560 u64 rx_nic_fifo_drop;
1422}; 1561};
1423 1562
1424/* Address/Length pairs for the coredump. */ 1563/* Firmware coredump internal register address/length pairs. */
1425enum { 1564enum {
1426 MPI_CORE_REGS_ADDR = 0x00030000, 1565 MPI_CORE_REGS_ADDR = 0x00030000,
1427 MPI_CORE_REGS_CNT = 127, 1566 MPI_CORE_REGS_CNT = 127,
@@ -1476,7 +1615,7 @@ struct mpi_coredump_segment_header {
1476 u8 description[16]; 1615 u8 description[16];
1477}; 1616};
1478 1617
1479/* Reg dump segment numbers. */ 1618/* Firmware coredump header segment numbers. */
1480enum { 1619enum {
1481 CORE_SEG_NUM = 1, 1620 CORE_SEG_NUM = 1,
1482 TEST_LOGIC_SEG_NUM = 2, 1621 TEST_LOGIC_SEG_NUM = 2,
@@ -1527,6 +1666,67 @@ enum {
1527 1666
1528}; 1667};
1529 1668
1669/* There are 64 generic NIC registers. */
1670#define NIC_REGS_DUMP_WORD_COUNT 64
1671/* XGMAC word count. */
1672#define XGMAC_DUMP_WORD_COUNT (XGMAC_REGISTER_END / 4)
1673/* Word counts for the SERDES blocks. */
1674#define XG_SERDES_XAUI_AN_COUNT 14
1675#define XG_SERDES_XAUI_HSS_PCS_COUNT 33
1676#define XG_SERDES_XFI_AN_COUNT 14
1677#define XG_SERDES_XFI_TRAIN_COUNT 12
1678#define XG_SERDES_XFI_HSS_PCS_COUNT 15
1679#define XG_SERDES_XFI_HSS_TX_COUNT 32
1680#define XG_SERDES_XFI_HSS_RX_COUNT 32
1681#define XG_SERDES_XFI_HSS_PLL_COUNT 32
1682
1683/* There are 2 CNA ETS and 8 NIC ETS registers. */
1684#define ETS_REGS_DUMP_WORD_COUNT 10
1685
1686/* Each probe mux entry stores the probe type plus 64 entries
1687 * that are each each 64-bits in length. There are a total of
1688 * 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes.
1689 */
1690#define PRB_MX_ADDR_PRB_WORD_COUNT (1 + (PRB_MX_ADDR_MAX_MUX * 2))
1691#define PRB_MX_DUMP_TOT_COUNT (PRB_MX_ADDR_PRB_WORD_COUNT * \
1692 PRB_MX_ADDR_VALID_TOTAL)
1693/* Each routing entry consists of 4 32-bit words.
1694 * They are route type, index, index word, and result.
1695 * There are 2 route blocks with 8 entries each and
1696 * 2 NIC blocks with 16 entries each.
1697 * The totol entries is 48 with 4 words each.
1698 */
1699#define RT_IDX_DUMP_ENTRIES 48
1700#define RT_IDX_DUMP_WORDS_PER_ENTRY 4
1701#define RT_IDX_DUMP_TOT_WORDS (RT_IDX_DUMP_ENTRIES * \
1702 RT_IDX_DUMP_WORDS_PER_ENTRY)
1703/* There are 10 address blocks in filter, each with
1704 * different entry counts and different word-count-per-entry.
1705 */
1706#define MAC_ADDR_DUMP_ENTRIES \
1707 ((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \
1708 (MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \
1709 (MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \
1710 (MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \
1711 (MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \
1712 (MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \
1713 (MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \
1714 (MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \
1715 (MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \
1716 (MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT))
1717#define MAC_ADDR_DUMP_WORDS_PER_ENTRY 2
1718#define MAC_ADDR_DUMP_TOT_WORDS (MAC_ADDR_DUMP_ENTRIES * \
1719 MAC_ADDR_DUMP_WORDS_PER_ENTRY)
1720/* Maximum of 4 functions whose semaphore registeres are
1721 * in the coredump.
1722 */
1723#define MAX_SEMAPHORE_FUNCTIONS 4
1724/* Defines for access the MPI shadow registers. */
1725#define RISC_124 0x0003007c
1726#define RISC_127 0x0003007f
1727#define SHADOW_OFFSET 0xb0000000
1728#define SHADOW_REG_SHIFT 20
1729
1530struct ql_nic_misc { 1730struct ql_nic_misc {
1531 u32 rx_ring_count; 1731 u32 rx_ring_count;
1532 u32 tx_ring_count; 1732 u32 tx_ring_count;
@@ -1568,6 +1768,199 @@ struct ql_reg_dump {
1568 u32 ets[8+2]; 1768 u32 ets[8+2];
1569}; 1769};
1570 1770
1771struct ql_mpi_coredump {
1772 /* segment 0 */
1773 struct mpi_coredump_global_header mpi_global_header;
1774
1775 /* segment 1 */
1776 struct mpi_coredump_segment_header core_regs_seg_hdr;
1777 u32 mpi_core_regs[MPI_CORE_REGS_CNT];
1778 u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT];
1779
1780 /* segment 2 */
1781 struct mpi_coredump_segment_header test_logic_regs_seg_hdr;
1782 u32 test_logic_regs[TEST_REGS_CNT];
1783
1784 /* segment 3 */
1785 struct mpi_coredump_segment_header rmii_regs_seg_hdr;
1786 u32 rmii_regs[RMII_REGS_CNT];
1787
1788 /* segment 4 */
1789 struct mpi_coredump_segment_header fcmac1_regs_seg_hdr;
1790 u32 fcmac1_regs[FCMAC_REGS_CNT];
1791
1792 /* segment 5 */
1793 struct mpi_coredump_segment_header fcmac2_regs_seg_hdr;
1794 u32 fcmac2_regs[FCMAC_REGS_CNT];
1795
1796 /* segment 6 */
1797 struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr;
1798 u32 fc1_mbx_regs[FC_MBX_REGS_CNT];
1799
1800 /* segment 7 */
1801 struct mpi_coredump_segment_header ide_regs_seg_hdr;
1802 u32 ide_regs[IDE_REGS_CNT];
1803
1804 /* segment 8 */
1805 struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr;
1806 u32 nic1_mbx_regs[NIC_MBX_REGS_CNT];
1807
1808 /* segment 9 */
1809 struct mpi_coredump_segment_header smbus_regs_seg_hdr;
1810 u32 smbus_regs[SMBUS_REGS_CNT];
1811
1812 /* segment 10 */
1813 struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr;
1814 u32 fc2_mbx_regs[FC_MBX_REGS_CNT];
1815
1816 /* segment 11 */
1817 struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr;
1818 u32 nic2_mbx_regs[NIC_MBX_REGS_CNT];
1819
1820 /* segment 12 */
1821 struct mpi_coredump_segment_header i2c_regs_seg_hdr;
1822 u32 i2c_regs[I2C_REGS_CNT];
1823 /* segment 13 */
1824 struct mpi_coredump_segment_header memc_regs_seg_hdr;
1825 u32 memc_regs[MEMC_REGS_CNT];
1826
1827 /* segment 14 */
1828 struct mpi_coredump_segment_header pbus_regs_seg_hdr;
1829 u32 pbus_regs[PBUS_REGS_CNT];
1830
1831 /* segment 15 */
1832 struct mpi_coredump_segment_header mde_regs_seg_hdr;
1833 u32 mde_regs[MDE_REGS_CNT];
1834
1835 /* segment 16 */
1836 struct mpi_coredump_segment_header nic_regs_seg_hdr;
1837 u32 nic_regs[NIC_REGS_DUMP_WORD_COUNT];
1838
1839 /* segment 17 */
1840 struct mpi_coredump_segment_header nic2_regs_seg_hdr;
1841 u32 nic2_regs[NIC_REGS_DUMP_WORD_COUNT];
1842
1843 /* segment 18 */
1844 struct mpi_coredump_segment_header xgmac1_seg_hdr;
1845 u32 xgmac1[XGMAC_DUMP_WORD_COUNT];
1846
1847 /* segment 19 */
1848 struct mpi_coredump_segment_header xgmac2_seg_hdr;
1849 u32 xgmac2[XGMAC_DUMP_WORD_COUNT];
1850
1851 /* segment 20 */
1852 struct mpi_coredump_segment_header code_ram_seg_hdr;
1853 u32 code_ram[CODE_RAM_CNT];
1854
1855 /* segment 21 */
1856 struct mpi_coredump_segment_header memc_ram_seg_hdr;
1857 u32 memc_ram[MEMC_RAM_CNT];
1858
1859 /* segment 22 */
1860 struct mpi_coredump_segment_header xaui_an_hdr;
1861 u32 serdes_xaui_an[XG_SERDES_XAUI_AN_COUNT];
1862
1863 /* segment 23 */
1864 struct mpi_coredump_segment_header xaui_hss_pcs_hdr;
1865 u32 serdes_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
1866
1867 /* segment 24 */
1868 struct mpi_coredump_segment_header xfi_an_hdr;
1869 u32 serdes_xfi_an[XG_SERDES_XFI_AN_COUNT];
1870
1871 /* segment 25 */
1872 struct mpi_coredump_segment_header xfi_train_hdr;
1873 u32 serdes_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
1874
1875 /* segment 26 */
1876 struct mpi_coredump_segment_header xfi_hss_pcs_hdr;
1877 u32 serdes_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
1878
1879 /* segment 27 */
1880 struct mpi_coredump_segment_header xfi_hss_tx_hdr;
1881 u32 serdes_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
1882
1883 /* segment 28 */
1884 struct mpi_coredump_segment_header xfi_hss_rx_hdr;
1885 u32 serdes_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
1886
1887 /* segment 29 */
1888 struct mpi_coredump_segment_header xfi_hss_pll_hdr;
1889 u32 serdes_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
1890
1891 /* segment 30 */
1892 struct mpi_coredump_segment_header misc_nic_seg_hdr;
1893 struct ql_nic_misc misc_nic_info;
1894
1895 /* segment 31 */
1896 /* one interrupt state for each CQ */
1897 struct mpi_coredump_segment_header intr_states_seg_hdr;
1898 u32 intr_states[MAX_RX_RINGS];
1899
1900 /* segment 32 */
1901 /* 3 cam words each for 16 unicast,
1902 * 2 cam words for each of 32 multicast.
1903 */
1904 struct mpi_coredump_segment_header cam_entries_seg_hdr;
1905 u32 cam_entries[(16 * 3) + (32 * 3)];
1906
1907 /* segment 33 */
1908 struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
1909 u32 nic_routing_words[16];
1910 /* segment 34 */
1911 struct mpi_coredump_segment_header ets_seg_hdr;
1912 u32 ets[ETS_REGS_DUMP_WORD_COUNT];
1913
1914 /* segment 35 */
1915 struct mpi_coredump_segment_header probe_dump_seg_hdr;
1916 u32 probe_dump[PRB_MX_DUMP_TOT_COUNT];
1917
1918 /* segment 36 */
1919 struct mpi_coredump_segment_header routing_reg_seg_hdr;
1920 u32 routing_regs[RT_IDX_DUMP_TOT_WORDS];
1921
1922 /* segment 37 */
1923 struct mpi_coredump_segment_header mac_prot_reg_seg_hdr;
1924 u32 mac_prot_regs[MAC_ADDR_DUMP_TOT_WORDS];
1925
1926 /* segment 38 */
1927 struct mpi_coredump_segment_header xaui2_an_hdr;
1928 u32 serdes2_xaui_an[XG_SERDES_XAUI_AN_COUNT];
1929
1930 /* segment 39 */
1931 struct mpi_coredump_segment_header xaui2_hss_pcs_hdr;
1932 u32 serdes2_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
1933
1934 /* segment 40 */
1935 struct mpi_coredump_segment_header xfi2_an_hdr;
1936 u32 serdes2_xfi_an[XG_SERDES_XFI_AN_COUNT];
1937
1938 /* segment 41 */
1939 struct mpi_coredump_segment_header xfi2_train_hdr;
1940 u32 serdes2_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
1941
1942 /* segment 42 */
1943 struct mpi_coredump_segment_header xfi2_hss_pcs_hdr;
1944 u32 serdes2_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
1945
1946 /* segment 43 */
1947 struct mpi_coredump_segment_header xfi2_hss_tx_hdr;
1948 u32 serdes2_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
1949
1950 /* segment 44 */
1951 struct mpi_coredump_segment_header xfi2_hss_rx_hdr;
1952 u32 serdes2_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
1953
1954 /* segment 45 */
1955 struct mpi_coredump_segment_header xfi2_hss_pll_hdr;
1956 u32 serdes2_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
1957
1958 /* segment 50 */
1959 /* semaphore register for all 5 functions */
1960 struct mpi_coredump_segment_header sem_regs_seg_hdr;
1961 u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS];
1962};
1963
1571/* 1964/*
1572 * intr_context structure is used during initialization 1965 * intr_context structure is used during initialization
1573 * to hook the interrupts. It is also used in a single 1966 * to hook the interrupts. It is also used in a single
@@ -1603,6 +1996,8 @@ enum {
1603 QL_CAM_RT_SET = 8, 1996 QL_CAM_RT_SET = 8,
1604 QL_SELFTEST = 9, 1997 QL_SELFTEST = 9,
1605 QL_LB_LINK_UP = 10, 1998 QL_LB_LINK_UP = 10,
1999 QL_FRC_COREDUMP = 11,
2000 QL_EEH_FATAL = 12,
1606}; 2001};
1607 2002
1608/* link_status bit definitions */ 2003/* link_status bit definitions */
@@ -1724,6 +2119,8 @@ struct ql_adapter {
1724 u32 port_link_up; 2119 u32 port_link_up;
1725 u32 port_init; 2120 u32 port_init;
1726 u32 link_status; 2121 u32 link_status;
2122 struct ql_mpi_coredump *mpi_coredump;
2123 u32 core_is_dumped;
1727 u32 link_config; 2124 u32 link_config;
1728 u32 led_config; 2125 u32 led_config;
1729 u32 max_frame_size; 2126 u32 max_frame_size;
@@ -1736,10 +2133,14 @@ struct ql_adapter {
1736 struct delayed_work mpi_work; 2133 struct delayed_work mpi_work;
1737 struct delayed_work mpi_port_cfg_work; 2134 struct delayed_work mpi_port_cfg_work;
1738 struct delayed_work mpi_idc_work; 2135 struct delayed_work mpi_idc_work;
2136 struct delayed_work mpi_core_to_log;
1739 struct completion ide_completion; 2137 struct completion ide_completion;
1740 struct nic_operations *nic_ops; 2138 struct nic_operations *nic_ops;
1741 u16 device_id; 2139 u16 device_id;
2140 struct timer_list timer;
1742 atomic_t lb_count; 2141 atomic_t lb_count;
2142 /* Keep local copy of current mac address. */
2143 char current_mac_addr[6];
1743}; 2144};
1744 2145
1745/* 2146/*
@@ -1807,6 +2208,7 @@ extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
1807void ql_queue_fw_error(struct ql_adapter *qdev); 2208void ql_queue_fw_error(struct ql_adapter *qdev);
1808void ql_mpi_work(struct work_struct *work); 2209void ql_mpi_work(struct work_struct *work);
1809void ql_mpi_reset_work(struct work_struct *work); 2210void ql_mpi_reset_work(struct work_struct *work);
2211void ql_mpi_core_to_log(struct work_struct *work);
1810int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit); 2212int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
1811void ql_queue_asic_error(struct ql_adapter *qdev); 2213void ql_queue_asic_error(struct ql_adapter *qdev);
1812u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr); 2214u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
@@ -1817,6 +2219,15 @@ void ql_mpi_port_cfg_work(struct work_struct *work);
1817int ql_mb_get_fw_state(struct ql_adapter *qdev); 2219int ql_mb_get_fw_state(struct ql_adapter *qdev);
1818int ql_cam_route_initialize(struct ql_adapter *qdev); 2220int ql_cam_route_initialize(struct ql_adapter *qdev);
1819int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data); 2221int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
2222int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
2223int ql_unpause_mpi_risc(struct ql_adapter *qdev);
2224int ql_pause_mpi_risc(struct ql_adapter *qdev);
2225int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
2226int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
2227 u32 ram_addr, int word_count);
2228int ql_core_dump(struct ql_adapter *qdev,
2229 struct ql_mpi_coredump *mpi_coredump);
2230int ql_mb_sys_err(struct ql_adapter *qdev);
1820int ql_mb_about_fw(struct ql_adapter *qdev); 2231int ql_mb_about_fw(struct ql_adapter *qdev);
1821int ql_wol(struct ql_adapter *qdev); 2232int ql_wol(struct ql_adapter *qdev);
1822int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol); 2233int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
@@ -1833,6 +2244,7 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
1833 struct ql_reg_dump *mpi_coredump); 2244 struct ql_reg_dump *mpi_coredump);
1834netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev); 2245netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
1835void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *); 2246void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
2247int ql_own_firmware(struct ql_adapter *qdev);
1836int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget); 2248int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
1837 2249
1838#if 1 2250#if 1
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 9f58c4710761..ff8550d2ca82 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1,5 +1,405 @@
1#include "qlge.h" 1#include "qlge.h"
2 2
3/* Read a NIC register from the alternate function. */
4static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
5 u32 reg)
6{
7 u32 register_to_read;
8 u32 reg_val;
9 unsigned int status = 0;
10
11 register_to_read = MPI_NIC_REG_BLOCK
12 | MPI_NIC_READ
13 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
14 | reg;
15 status = ql_read_mpi_reg(qdev, register_to_read, &reg_val);
16 if (status != 0)
17 return 0xffffffff;
18
19 return reg_val;
20}
21
22/* Write a NIC register from the alternate function. */
23static int ql_write_other_func_reg(struct ql_adapter *qdev,
24 u32 reg, u32 reg_val)
25{
26 u32 register_to_read;
27 int status = 0;
28
29 register_to_read = MPI_NIC_REG_BLOCK
30 | MPI_NIC_READ
31 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
32 | reg;
33 status = ql_write_mpi_reg(qdev, register_to_read, reg_val);
34
35 return status;
36}
37
38static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
39 u32 bit, u32 err_bit)
40{
41 u32 temp;
42 int count = 10;
43
44 while (count) {
45 temp = ql_read_other_func_reg(qdev, reg);
46
47 /* check for errors */
48 if (temp & err_bit)
49 return -1;
50 else if (temp & bit)
51 return 0;
52 mdelay(10);
53 count--;
54 }
55 return -1;
56}
57
58static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
59 u32 *data)
60{
61 int status;
62
63 /* wait for reg to come ready */
64 status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
65 XG_SERDES_ADDR_RDY, 0);
66 if (status)
67 goto exit;
68
69 /* set up for reg read */
70 ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R);
71
72 /* wait for reg to come ready */
73 status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
74 XG_SERDES_ADDR_RDY, 0);
75 if (status)
76 goto exit;
77
78 /* get the data */
79 *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
80exit:
81 return status;
82}
83
84/* Read out the SERDES registers */
85static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data)
86{
87 int status;
88
89 /* wait for reg to come ready */
90 status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
91 if (status)
92 goto exit;
93
94 /* set up for reg read */
95 ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
96
97 /* wait for reg to come ready */
98 status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
99 if (status)
100 goto exit;
101
102 /* get the data */
103 *data = ql_read32(qdev, XG_SERDES_DATA);
104exit:
105 return status;
106}
107
108static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
109 u32 *direct_ptr, u32 *indirect_ptr,
110 unsigned int direct_valid, unsigned int indirect_valid)
111{
112 unsigned int status;
113
114 status = 1;
115 if (direct_valid)
116 status = ql_read_serdes_reg(qdev, addr, direct_ptr);
117 /* Dead fill any failures or invalids. */
118 if (status)
119 *direct_ptr = 0xDEADBEEF;
120
121 status = 1;
122 if (indirect_valid)
123 status = ql_read_other_func_serdes_reg(
124 qdev, addr, indirect_ptr);
125 /* Dead fill any failures or invalids. */
126 if (status)
127 *indirect_ptr = 0xDEADBEEF;
128}
129
130static int ql_get_serdes_regs(struct ql_adapter *qdev,
131 struct ql_mpi_coredump *mpi_coredump)
132{
133 int status;
134 unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid;
135 unsigned int xaui_indirect_valid, i;
136 u32 *direct_ptr, temp;
137 u32 *indirect_ptr;
138
139 xfi_direct_valid = xfi_indirect_valid = 0;
140 xaui_direct_valid = xaui_indirect_valid = 1;
141
142 /* The XAUI needs to be read out per port */
143 if (qdev->func & 1) {
144 /* We are NIC 2 */
145 status = ql_read_other_func_serdes_reg(qdev,
146 XG_SERDES_XAUI_HSS_PCS_START, &temp);
147 if (status)
148 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
149 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
150 XG_SERDES_ADDR_XAUI_PWR_DOWN)
151 xaui_indirect_valid = 0;
152
153 status = ql_read_serdes_reg(qdev,
154 XG_SERDES_XAUI_HSS_PCS_START, &temp);
155 if (status)
156 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
157
158 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
159 XG_SERDES_ADDR_XAUI_PWR_DOWN)
160 xaui_direct_valid = 0;
161 } else {
162 /* We are NIC 1 */
163 status = ql_read_other_func_serdes_reg(qdev,
164 XG_SERDES_XAUI_HSS_PCS_START, &temp);
165 if (status)
166 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
167 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
168 XG_SERDES_ADDR_XAUI_PWR_DOWN)
169 xaui_indirect_valid = 0;
170
171 status = ql_read_serdes_reg(qdev,
172 XG_SERDES_XAUI_HSS_PCS_START, &temp);
173 if (status)
174 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
175 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
176 XG_SERDES_ADDR_XAUI_PWR_DOWN)
177 xaui_direct_valid = 0;
178 }
179
180 /*
181 * XFI register is shared so only need to read one
182 * functions and then check the bits.
183 */
184 status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
185 if (status)
186 temp = 0;
187
188 if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
189 XG_SERDES_ADDR_XFI1_PWR_UP) {
190 /* now see if i'm NIC 1 or NIC 2 */
191 if (qdev->func & 1)
192 /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
193 xfi_indirect_valid = 1;
194 else
195 xfi_direct_valid = 1;
196 }
197 if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
198 XG_SERDES_ADDR_XFI2_PWR_UP) {
199 /* now see if i'm NIC 1 or NIC 2 */
200 if (qdev->func & 1)
201 /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
202 xfi_direct_valid = 1;
203 else
204 xfi_indirect_valid = 1;
205 }
206
207 /* Get XAUI_AN register block. */
208 if (qdev->func & 1) {
209 /* Function 2 is direct */
210 direct_ptr = mpi_coredump->serdes2_xaui_an;
211 indirect_ptr = mpi_coredump->serdes_xaui_an;
212 } else {
213 /* Function 1 is direct */
214 direct_ptr = mpi_coredump->serdes_xaui_an;
215 indirect_ptr = mpi_coredump->serdes2_xaui_an;
216 }
217
218 for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
219 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
220 xaui_direct_valid, xaui_indirect_valid);
221
222 /* Get XAUI_HSS_PCS register block. */
223 if (qdev->func & 1) {
224 direct_ptr =
225 mpi_coredump->serdes2_xaui_hss_pcs;
226 indirect_ptr =
227 mpi_coredump->serdes_xaui_hss_pcs;
228 } else {
229 direct_ptr =
230 mpi_coredump->serdes_xaui_hss_pcs;
231 indirect_ptr =
232 mpi_coredump->serdes2_xaui_hss_pcs;
233 }
234
235 for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
236 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
237 xaui_direct_valid, xaui_indirect_valid);
238
239 /* Get XAUI_XFI_AN register block. */
240 if (qdev->func & 1) {
241 direct_ptr = mpi_coredump->serdes2_xfi_an;
242 indirect_ptr = mpi_coredump->serdes_xfi_an;
243 } else {
244 direct_ptr = mpi_coredump->serdes_xfi_an;
245 indirect_ptr = mpi_coredump->serdes2_xfi_an;
246 }
247
248 for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
249 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
250 xfi_direct_valid, xfi_indirect_valid);
251
252 /* Get XAUI_XFI_TRAIN register block. */
253 if (qdev->func & 1) {
254 direct_ptr = mpi_coredump->serdes2_xfi_train;
255 indirect_ptr =
256 mpi_coredump->serdes_xfi_train;
257 } else {
258 direct_ptr = mpi_coredump->serdes_xfi_train;
259 indirect_ptr =
260 mpi_coredump->serdes2_xfi_train;
261 }
262
263 for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
264 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
265 xfi_direct_valid, xfi_indirect_valid);
266
267 /* Get XAUI_XFI_HSS_PCS register block. */
268 if (qdev->func & 1) {
269 direct_ptr =
270 mpi_coredump->serdes2_xfi_hss_pcs;
271 indirect_ptr =
272 mpi_coredump->serdes_xfi_hss_pcs;
273 } else {
274 direct_ptr =
275 mpi_coredump->serdes_xfi_hss_pcs;
276 indirect_ptr =
277 mpi_coredump->serdes2_xfi_hss_pcs;
278 }
279
280 for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
281 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
282 xfi_direct_valid, xfi_indirect_valid);
283
284 /* Get XAUI_XFI_HSS_TX register block. */
285 if (qdev->func & 1) {
286 direct_ptr =
287 mpi_coredump->serdes2_xfi_hss_tx;
288 indirect_ptr =
289 mpi_coredump->serdes_xfi_hss_tx;
290 } else {
291 direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
292 indirect_ptr =
293 mpi_coredump->serdes2_xfi_hss_tx;
294 }
295 for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
296 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
297 xfi_direct_valid, xfi_indirect_valid);
298
299 /* Get XAUI_XFI_HSS_RX register block. */
300 if (qdev->func & 1) {
301 direct_ptr =
302 mpi_coredump->serdes2_xfi_hss_rx;
303 indirect_ptr =
304 mpi_coredump->serdes_xfi_hss_rx;
305 } else {
306 direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
307 indirect_ptr =
308 mpi_coredump->serdes2_xfi_hss_rx;
309 }
310
311 for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
312 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
313 xfi_direct_valid, xfi_indirect_valid);
314
315
316 /* Get XAUI_XFI_HSS_PLL register block. */
317 if (qdev->func & 1) {
318 direct_ptr =
319 mpi_coredump->serdes2_xfi_hss_pll;
320 indirect_ptr =
321 mpi_coredump->serdes_xfi_hss_pll;
322 } else {
323 direct_ptr =
324 mpi_coredump->serdes_xfi_hss_pll;
325 indirect_ptr =
326 mpi_coredump->serdes2_xfi_hss_pll;
327 }
328 for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
329 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
330 xfi_direct_valid, xfi_indirect_valid);
331 return 0;
332}
333
334static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
335 u32 *data)
336{
337 int status = 0;
338
339 /* wait for reg to come ready */
340 status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
341 XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
342 if (status)
343 goto exit;
344
345 /* set up for reg read */
346 ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
347
348 /* wait for reg to come ready */
349 status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
350 XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
351 if (status)
352 goto exit;
353
354 /* get the data */
355 *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
356exit:
357 return status;
358}
359
360/* Read the 400 xgmac control/statistics registers
361 * skipping unused locations.
362 */
363static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf,
364 unsigned int other_function)
365{
366 int status = 0;
367 int i;
368
369 for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
370 /* We're reading 400 xgmac registers, but we filter out
371 * serveral locations that are non-responsive to reads.
372 */
373 if ((i == 0x00000114) ||
374 (i == 0x00000118) ||
375 (i == 0x0000013c) ||
376 (i == 0x00000140) ||
377 (i > 0x00000150 && i < 0x000001fc) ||
378 (i > 0x00000278 && i < 0x000002a0) ||
379 (i > 0x000002c0 && i < 0x000002cf) ||
380 (i > 0x000002dc && i < 0x000002f0) ||
381 (i > 0x000003c8 && i < 0x00000400) ||
382 (i > 0x00000400 && i < 0x00000410) ||
383 (i > 0x00000410 && i < 0x00000420) ||
384 (i > 0x00000420 && i < 0x00000430) ||
385 (i > 0x00000430 && i < 0x00000440) ||
386 (i > 0x00000440 && i < 0x00000450) ||
387 (i > 0x00000450 && i < 0x00000500) ||
388 (i > 0x0000054c && i < 0x00000568) ||
389 (i > 0x000005c8 && i < 0x00000600)) {
390 if (other_function)
391 status =
392 ql_read_other_func_xgmac_reg(qdev, i, buf);
393 else
394 status = ql_read_xgmac_reg(qdev, i, buf);
395
396 if (status)
397 *buf = 0xdeadbeef;
398 break;
399 }
400 }
401 return status;
402}
3 403
4static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf) 404static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
5{ 405{
@@ -43,8 +443,8 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
43 status = ql_get_mac_addr_reg(qdev, 443 status = ql_get_mac_addr_reg(qdev,
44 MAC_ADDR_TYPE_CAM_MAC, i, value); 444 MAC_ADDR_TYPE_CAM_MAC, i, value);
45 if (status) { 445 if (status) {
46 QPRINTK(qdev, DRV, ERR, 446 netif_err(qdev, drv, qdev->ndev,
47 "Failed read of mac index register.\n"); 447 "Failed read of mac index register.\n");
48 goto err; 448 goto err;
49 } 449 }
50 *buf++ = value[0]; /* lower MAC address */ 450 *buf++ = value[0]; /* lower MAC address */
@@ -55,8 +455,8 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
55 status = ql_get_mac_addr_reg(qdev, 455 status = ql_get_mac_addr_reg(qdev,
56 MAC_ADDR_TYPE_MULTI_MAC, i, value); 456 MAC_ADDR_TYPE_MULTI_MAC, i, value);
57 if (status) { 457 if (status) {
58 QPRINTK(qdev, DRV, ERR, 458 netif_err(qdev, drv, qdev->ndev,
59 "Failed read of mac index register.\n"); 459 "Failed read of mac index register.\n");
60 goto err; 460 goto err;
61 } 461 }
62 *buf++ = value[0]; /* lower Mcast address */ 462 *buf++ = value[0]; /* lower Mcast address */
@@ -79,8 +479,8 @@ static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
79 for (i = 0; i < 16; i++) { 479 for (i = 0; i < 16; i++) {
80 status = ql_get_routing_reg(qdev, i, &value); 480 status = ql_get_routing_reg(qdev, i, &value);
81 if (status) { 481 if (status) {
82 QPRINTK(qdev, DRV, ERR, 482 netif_err(qdev, drv, qdev->ndev,
83 "Failed read of routing index register.\n"); 483 "Failed read of routing index register.\n");
84 goto err; 484 goto err;
85 } else { 485 } else {
86 *buf++ = value; 486 *buf++ = value;
@@ -91,6 +491,226 @@ err:
91 return status; 491 return status;
92} 492}
93 493
494/* Read the MPI Processor shadow registers */
495static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf)
496{
497 u32 i;
498 int status;
499
500 for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
501 status = ql_write_mpi_reg(qdev, RISC_124,
502 (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
503 if (status)
504 goto end;
505 status = ql_read_mpi_reg(qdev, RISC_127, buf);
506 if (status)
507 goto end;
508 }
509end:
510 return status;
511}
512
513/* Read the MPI Processor core registers */
514static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf,
515 u32 offset, u32 count)
516{
517 int i, status = 0;
518 for (i = 0; i < count; i++, buf++) {
519 status = ql_read_mpi_reg(qdev, offset + i, buf);
520 if (status)
521 return status;
522 }
523 return status;
524}
525
526/* Read the ASIC probe dump */
527static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
528 u32 valid, u32 *buf)
529{
530 u32 module, mux_sel, probe, lo_val, hi_val;
531
532 for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
533 if (!((valid >> module) & 1))
534 continue;
535 for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
536 probe = clock
537 | PRB_MX_ADDR_ARE
538 | mux_sel
539 | (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
540 ql_write32(qdev, PRB_MX_ADDR, probe);
541 lo_val = ql_read32(qdev, PRB_MX_DATA);
542 if (mux_sel == 0) {
543 *buf = probe;
544 buf++;
545 }
546 probe |= PRB_MX_ADDR_UP;
547 ql_write32(qdev, PRB_MX_ADDR, probe);
548 hi_val = ql_read32(qdev, PRB_MX_DATA);
549 *buf = lo_val;
550 buf++;
551 *buf = hi_val;
552 buf++;
553 }
554 }
555 return buf;
556}
557
558static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
559{
560 /* First we have to enable the probe mux */
561 ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
562 buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
563 PRB_MX_ADDR_VALID_SYS_MOD, buf);
564 buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
565 PRB_MX_ADDR_VALID_PCI_MOD, buf);
566 buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
567 PRB_MX_ADDR_VALID_XGM_MOD, buf);
568 buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
569 PRB_MX_ADDR_VALID_FC_MOD, buf);
570 return 0;
571
572}
573
574/* Read out the routing index registers */
575static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
576{
577 int status;
578 u32 type, index, index_max;
579 u32 result_index;
580 u32 result_data;
581 u32 val;
582
583 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
584 if (status)
585 return status;
586
587 for (type = 0; type < 4; type++) {
588 if (type < 2)
589 index_max = 8;
590 else
591 index_max = 16;
592 for (index = 0; index < index_max; index++) {
593 val = RT_IDX_RS
594 | (type << RT_IDX_TYPE_SHIFT)
595 | (index << RT_IDX_IDX_SHIFT);
596 ql_write32(qdev, RT_IDX, val);
597 result_index = 0;
598 while ((result_index & RT_IDX_MR) == 0)
599 result_index = ql_read32(qdev, RT_IDX);
600 result_data = ql_read32(qdev, RT_DATA);
601 *buf = type;
602 buf++;
603 *buf = index;
604 buf++;
605 *buf = result_index;
606 buf++;
607 *buf = result_data;
608 buf++;
609 }
610 }
611 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
612 return status;
613}
614
615/* Read out the MAC protocol registers */
616static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
617{
618 u32 result_index, result_data;
619 u32 type;
620 u32 index;
621 u32 offset;
622 u32 val;
623 u32 initial_val = MAC_ADDR_RS;
624 u32 max_index;
625 u32 max_offset;
626
627 for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
628 switch (type) {
629
630 case 0: /* CAM */
631 initial_val |= MAC_ADDR_ADR;
632 max_index = MAC_ADDR_MAX_CAM_ENTRIES;
633 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
634 break;
635 case 1: /* Multicast MAC Address */
636 max_index = MAC_ADDR_MAX_CAM_WCOUNT;
637 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
638 break;
639 case 2: /* VLAN filter mask */
640 case 3: /* MC filter mask */
641 max_index = MAC_ADDR_MAX_CAM_WCOUNT;
642 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
643 break;
644 case 4: /* FC MAC addresses */
645 max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
646 max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
647 break;
648 case 5: /* Mgmt MAC addresses */
649 max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
650 max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
651 break;
652 case 6: /* Mgmt VLAN addresses */
653 max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
654 max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
655 break;
656 case 7: /* Mgmt IPv4 address */
657 max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
658 max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
659 break;
660 case 8: /* Mgmt IPv6 address */
661 max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
662 max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
663 break;
664 case 9: /* Mgmt TCP/UDP Dest port */
665 max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
666 max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
667 break;
668 default:
669 printk(KERN_ERR"Bad type!!! 0x%08x\n", type);
670 max_index = 0;
671 max_offset = 0;
672 break;
673 }
674 for (index = 0; index < max_index; index++) {
675 for (offset = 0; offset < max_offset; offset++) {
676 val = initial_val
677 | (type << MAC_ADDR_TYPE_SHIFT)
678 | (index << MAC_ADDR_IDX_SHIFT)
679 | (offset);
680 ql_write32(qdev, MAC_ADDR_IDX, val);
681 result_index = 0;
682 while ((result_index & MAC_ADDR_MR) == 0) {
683 result_index = ql_read32(qdev,
684 MAC_ADDR_IDX);
685 }
686 result_data = ql_read32(qdev, MAC_ADDR_DATA);
687 *buf = result_index;
688 buf++;
689 *buf = result_data;
690 buf++;
691 }
692 }
693 }
694}
695
696static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
697{
698 u32 func_num, reg, reg_val;
699 int status;
700
701 for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
702 reg = MPI_NIC_REG_BLOCK
703 | (func_num << MPI_NIC_FUNCTION_SHIFT)
704 | (SEM / 4);
705 status = ql_read_mpi_reg(qdev, reg, &reg_val);
706 *buf = reg_val;
707 /* if the read failed then dead fill the element. */
708 if (!status)
709 *buf = 0xdeadbeef;
710 buf++;
711 }
712}
713
94/* Create a coredump segment header */ 714/* Create a coredump segment header */
95static void ql_build_coredump_seg_header( 715static void ql_build_coredump_seg_header(
96 struct mpi_coredump_segment_header *seg_hdr, 716 struct mpi_coredump_segment_header *seg_hdr,
@@ -103,6 +723,526 @@ static void ql_build_coredump_seg_header(
103 memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1); 723 memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
104} 724}
105 725
726/*
727 * This function should be called when a coredump / probedump
728 * is to be extracted from the HBA. It is assumed there is a
729 * qdev structure that contains the base address of the register
730 * space for this function as well as a coredump structure that
731 * will contain the dump.
732 */
733int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
734{
735 int status;
736 int i;
737
738 if (!mpi_coredump) {
739 netif_err(qdev, drv, qdev->ndev, "No memory available.\n");
740 return -ENOMEM;
741 }
742
743 /* Try to get the spinlock, but dont worry if
744 * it isn't available. If the firmware died it
745 * might be holding the sem.
746 */
747 ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
748
749 status = ql_pause_mpi_risc(qdev);
750 if (status) {
751 netif_err(qdev, drv, qdev->ndev,
752 "Failed RISC pause. Status = 0x%.08x\n", status);
753 goto err;
754 }
755
756 /* Insert the global header */
757 memset(&(mpi_coredump->mpi_global_header), 0,
758 sizeof(struct mpi_coredump_global_header));
759 mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
760 mpi_coredump->mpi_global_header.headerSize =
761 sizeof(struct mpi_coredump_global_header);
762 mpi_coredump->mpi_global_header.imageSize =
763 sizeof(struct ql_mpi_coredump);
764 memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
765 sizeof(mpi_coredump->mpi_global_header.idString));
766
767 /* Get generic NIC reg dump */
768 ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
769 NIC1_CONTROL_SEG_NUM,
770 sizeof(struct mpi_coredump_segment_header) +
771 sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
772
773 ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
774 NIC2_CONTROL_SEG_NUM,
775 sizeof(struct mpi_coredump_segment_header) +
776 sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
777
778 /* Get XGMac registers. (Segment 18, Rev C. step 21) */
779 ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
780 NIC1_XGMAC_SEG_NUM,
781 sizeof(struct mpi_coredump_segment_header) +
782 sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
783
784 ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
785 NIC2_XGMAC_SEG_NUM,
786 sizeof(struct mpi_coredump_segment_header) +
787 sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
788
789 if (qdev->func & 1) {
790 /* Odd means our function is NIC 2 */
791 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
792 mpi_coredump->nic2_regs[i] =
793 ql_read32(qdev, i * sizeof(u32));
794
795 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
796 mpi_coredump->nic_regs[i] =
797 ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
798
799 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
800 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
801 } else {
802 /* Even means our function is NIC 1 */
803 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
804 mpi_coredump->nic_regs[i] =
805 ql_read32(qdev, i * sizeof(u32));
806 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
807 mpi_coredump->nic2_regs[i] =
808 ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
809
810 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
811 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
812 }
813
814 /* Rev C. Step 20a */
815 ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
816 XAUI_AN_SEG_NUM,
817 sizeof(struct mpi_coredump_segment_header) +
818 sizeof(mpi_coredump->serdes_xaui_an),
819 "XAUI AN Registers");
820
821 /* Rev C. Step 20b */
822 ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
823 XAUI_HSS_PCS_SEG_NUM,
824 sizeof(struct mpi_coredump_segment_header) +
825 sizeof(mpi_coredump->serdes_xaui_hss_pcs),
826 "XAUI HSS PCS Registers");
827
828 ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
829 sizeof(struct mpi_coredump_segment_header) +
830 sizeof(mpi_coredump->serdes_xfi_an),
831 "XFI AN Registers");
832
833 ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
834 XFI_TRAIN_SEG_NUM,
835 sizeof(struct mpi_coredump_segment_header) +
836 sizeof(mpi_coredump->serdes_xfi_train),
837 "XFI TRAIN Registers");
838
839 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
840 XFI_HSS_PCS_SEG_NUM,
841 sizeof(struct mpi_coredump_segment_header) +
842 sizeof(mpi_coredump->serdes_xfi_hss_pcs),
843 "XFI HSS PCS Registers");
844
845 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
846 XFI_HSS_TX_SEG_NUM,
847 sizeof(struct mpi_coredump_segment_header) +
848 sizeof(mpi_coredump->serdes_xfi_hss_tx),
849 "XFI HSS TX Registers");
850
851 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
852 XFI_HSS_RX_SEG_NUM,
853 sizeof(struct mpi_coredump_segment_header) +
854 sizeof(mpi_coredump->serdes_xfi_hss_rx),
855 "XFI HSS RX Registers");
856
857 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
858 XFI_HSS_PLL_SEG_NUM,
859 sizeof(struct mpi_coredump_segment_header) +
860 sizeof(mpi_coredump->serdes_xfi_hss_pll),
861 "XFI HSS PLL Registers");
862
863 ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
864 XAUI2_AN_SEG_NUM,
865 sizeof(struct mpi_coredump_segment_header) +
866 sizeof(mpi_coredump->serdes2_xaui_an),
867 "XAUI2 AN Registers");
868
869 ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
870 XAUI2_HSS_PCS_SEG_NUM,
871 sizeof(struct mpi_coredump_segment_header) +
872 sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
873 "XAUI2 HSS PCS Registers");
874
875 ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
876 XFI2_AN_SEG_NUM,
877 sizeof(struct mpi_coredump_segment_header) +
878 sizeof(mpi_coredump->serdes2_xfi_an),
879 "XFI2 AN Registers");
880
881 ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
882 XFI2_TRAIN_SEG_NUM,
883 sizeof(struct mpi_coredump_segment_header) +
884 sizeof(mpi_coredump->serdes2_xfi_train),
885 "XFI2 TRAIN Registers");
886
887 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
888 XFI2_HSS_PCS_SEG_NUM,
889 sizeof(struct mpi_coredump_segment_header) +
890 sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
891 "XFI2 HSS PCS Registers");
892
893 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
894 XFI2_HSS_TX_SEG_NUM,
895 sizeof(struct mpi_coredump_segment_header) +
896 sizeof(mpi_coredump->serdes2_xfi_hss_tx),
897 "XFI2 HSS TX Registers");
898
899 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
900 XFI2_HSS_RX_SEG_NUM,
901 sizeof(struct mpi_coredump_segment_header) +
902 sizeof(mpi_coredump->serdes2_xfi_hss_rx),
903 "XFI2 HSS RX Registers");
904
905 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
906 XFI2_HSS_PLL_SEG_NUM,
907 sizeof(struct mpi_coredump_segment_header) +
908 sizeof(mpi_coredump->serdes2_xfi_hss_pll),
909 "XFI2 HSS PLL Registers");
910
911 status = ql_get_serdes_regs(qdev, mpi_coredump);
912 if (status) {
913 netif_err(qdev, drv, qdev->ndev,
914 "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
915 status);
916 goto err;
917 }
918
919 ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
920 CORE_SEG_NUM,
921 sizeof(mpi_coredump->core_regs_seg_hdr) +
922 sizeof(mpi_coredump->mpi_core_regs) +
923 sizeof(mpi_coredump->mpi_core_sh_regs),
924 "Core Registers");
925
926 /* Get the MPI Core Registers */
927 status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
928 MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
929 if (status)
930 goto err;
931 /* Get the 16 MPI shadow registers */
932 status = ql_get_mpi_shadow_regs(qdev,
933 &mpi_coredump->mpi_core_sh_regs[0]);
934 if (status)
935 goto err;
936
937 /* Get the Test Logic Registers */
938 ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
939 TEST_LOGIC_SEG_NUM,
940 sizeof(struct mpi_coredump_segment_header)
941 + sizeof(mpi_coredump->test_logic_regs),
942 "Test Logic Regs");
943 status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
944 TEST_REGS_ADDR, TEST_REGS_CNT);
945 if (status)
946 goto err;
947
948 /* Get the RMII Registers */
949 ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
950 RMII_SEG_NUM,
951 sizeof(struct mpi_coredump_segment_header)
952 + sizeof(mpi_coredump->rmii_regs),
953 "RMII Registers");
954 status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
955 RMII_REGS_ADDR, RMII_REGS_CNT);
956 if (status)
957 goto err;
958
959 /* Get the FCMAC1 Registers */
960 ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
961 FCMAC1_SEG_NUM,
962 sizeof(struct mpi_coredump_segment_header)
963 + sizeof(mpi_coredump->fcmac1_regs),
964 "FCMAC1 Registers");
965 status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
966 FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
967 if (status)
968 goto err;
969
970 /* Get the FCMAC2 Registers */
971
972 ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
973 FCMAC2_SEG_NUM,
974 sizeof(struct mpi_coredump_segment_header)
975 + sizeof(mpi_coredump->fcmac2_regs),
976 "FCMAC2 Registers");
977
978 status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
979 FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
980 if (status)
981 goto err;
982
983 /* Get the FC1 MBX Registers */
984 ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
985 FC1_MBOX_SEG_NUM,
986 sizeof(struct mpi_coredump_segment_header)
987 + sizeof(mpi_coredump->fc1_mbx_regs),
988 "FC1 MBox Regs");
989 status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
990 FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
991 if (status)
992 goto err;
993
994 /* Get the IDE Registers */
995 ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
996 IDE_SEG_NUM,
997 sizeof(struct mpi_coredump_segment_header)
998 + sizeof(mpi_coredump->ide_regs),
999 "IDE Registers");
1000 status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
1001 IDE_REGS_ADDR, IDE_REGS_CNT);
1002 if (status)
1003 goto err;
1004
1005 /* Get the NIC1 MBX Registers */
1006 ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
1007 NIC1_MBOX_SEG_NUM,
1008 sizeof(struct mpi_coredump_segment_header)
1009 + sizeof(mpi_coredump->nic1_mbx_regs),
1010 "NIC1 MBox Regs");
1011 status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
1012 NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
1013 if (status)
1014 goto err;
1015
1016 /* Get the SMBus Registers */
1017 ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
1018 SMBUS_SEG_NUM,
1019 sizeof(struct mpi_coredump_segment_header)
1020 + sizeof(mpi_coredump->smbus_regs),
1021 "SMBus Registers");
1022 status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
1023 SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
1024 if (status)
1025 goto err;
1026
1027 /* Get the FC2 MBX Registers */
1028 ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
1029 FC2_MBOX_SEG_NUM,
1030 sizeof(struct mpi_coredump_segment_header)
1031 + sizeof(mpi_coredump->fc2_mbx_regs),
1032 "FC2 MBox Regs");
1033 status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
1034 FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
1035 if (status)
1036 goto err;
1037
1038 /* Get the NIC2 MBX Registers */
1039 ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
1040 NIC2_MBOX_SEG_NUM,
1041 sizeof(struct mpi_coredump_segment_header)
1042 + sizeof(mpi_coredump->nic2_mbx_regs),
1043 "NIC2 MBox Regs");
1044 status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
1045 NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
1046 if (status)
1047 goto err;
1048
1049 /* Get the I2C Registers */
1050 ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
1051 I2C_SEG_NUM,
1052 sizeof(struct mpi_coredump_segment_header)
1053 + sizeof(mpi_coredump->i2c_regs),
1054 "I2C Registers");
1055 status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
1056 I2C_REGS_ADDR, I2C_REGS_CNT);
1057 if (status)
1058 goto err;
1059
1060 /* Get the MEMC Registers */
1061 ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
1062 MEMC_SEG_NUM,
1063 sizeof(struct mpi_coredump_segment_header)
1064 + sizeof(mpi_coredump->memc_regs),
1065 "MEMC Registers");
1066 status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
1067 MEMC_REGS_ADDR, MEMC_REGS_CNT);
1068 if (status)
1069 goto err;
1070
1071 /* Get the PBus Registers */
1072 ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
1073 PBUS_SEG_NUM,
1074 sizeof(struct mpi_coredump_segment_header)
1075 + sizeof(mpi_coredump->pbus_regs),
1076 "PBUS Registers");
1077 status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
1078 PBUS_REGS_ADDR, PBUS_REGS_CNT);
1079 if (status)
1080 goto err;
1081
1082 /* Get the MDE Registers */
1083 ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
1084 MDE_SEG_NUM,
1085 sizeof(struct mpi_coredump_segment_header)
1086 + sizeof(mpi_coredump->mde_regs),
1087 "MDE Registers");
1088 status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
1089 MDE_REGS_ADDR, MDE_REGS_CNT);
1090 if (status)
1091 goto err;
1092
1093 ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
1094 MISC_NIC_INFO_SEG_NUM,
1095 sizeof(struct mpi_coredump_segment_header)
1096 + sizeof(mpi_coredump->misc_nic_info),
1097 "MISC NIC INFO");
1098 mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
1099 mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
1100 mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
1101 mpi_coredump->misc_nic_info.function = qdev->func;
1102
1103 /* Segment 31 */
1104 /* Get indexed register values. */
1105 ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
1106 INTR_STATES_SEG_NUM,
1107 sizeof(struct mpi_coredump_segment_header)
1108 + sizeof(mpi_coredump->intr_states),
1109 "INTR States");
1110 ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
1111
1112 ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
1113 CAM_ENTRIES_SEG_NUM,
1114 sizeof(struct mpi_coredump_segment_header)
1115 + sizeof(mpi_coredump->cam_entries),
1116 "CAM Entries");
1117 status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
1118 if (status)
1119 goto err;
1120
1121 ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
1122 ROUTING_WORDS_SEG_NUM,
1123 sizeof(struct mpi_coredump_segment_header)
1124 + sizeof(mpi_coredump->nic_routing_words),
1125 "Routing Words");
1126 status = ql_get_routing_entries(qdev,
1127 &mpi_coredump->nic_routing_words[0]);
1128 if (status)
1129 goto err;
1130
1131 /* Segment 34 (Rev C. step 23) */
1132 ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
1133 ETS_SEG_NUM,
1134 sizeof(struct mpi_coredump_segment_header)
1135 + sizeof(mpi_coredump->ets),
1136 "ETS Registers");
1137 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1138 if (status)
1139 goto err;
1140
1141 ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
1142 PROBE_DUMP_SEG_NUM,
1143 sizeof(struct mpi_coredump_segment_header)
1144 + sizeof(mpi_coredump->probe_dump),
1145 "Probe Dump");
1146 ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
1147
1148 ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
1149 ROUTING_INDEX_SEG_NUM,
1150 sizeof(struct mpi_coredump_segment_header)
1151 + sizeof(mpi_coredump->routing_regs),
1152 "Routing Regs");
1153 status = ql_get_routing_index_registers(qdev,
1154 &mpi_coredump->routing_regs[0]);
1155 if (status)
1156 goto err;
1157
1158 ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
1159 MAC_PROTOCOL_SEG_NUM,
1160 sizeof(struct mpi_coredump_segment_header)
1161 + sizeof(mpi_coredump->mac_prot_regs),
1162 "MAC Prot Regs");
1163 ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
1164
1165 /* Get the semaphore registers for all 5 functions */
1166 ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
1167 SEM_REGS_SEG_NUM,
1168 sizeof(struct mpi_coredump_segment_header) +
1169 sizeof(mpi_coredump->sem_regs), "Sem Registers");
1170
1171 ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
1172
1173 /* Prevent the mpi restarting while we dump the memory.*/
1174 ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
1175
1176 /* clear the pause */
1177 status = ql_unpause_mpi_risc(qdev);
1178 if (status) {
1179 netif_err(qdev, drv, qdev->ndev,
1180 "Failed RISC unpause. Status = 0x%.08x\n", status);
1181 goto err;
1182 }
1183
1184 /* Reset the RISC so we can dump RAM */
1185 status = ql_hard_reset_mpi_risc(qdev);
1186 if (status) {
1187 netif_err(qdev, drv, qdev->ndev,
1188 "Failed RISC reset. Status = 0x%.08x\n", status);
1189 goto err;
1190 }
1191
1192 ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
1193 WCS_RAM_SEG_NUM,
1194 sizeof(struct mpi_coredump_segment_header)
1195 + sizeof(mpi_coredump->code_ram),
1196 "WCS RAM");
1197 status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
1198 CODE_RAM_ADDR, CODE_RAM_CNT);
1199 if (status) {
1200 netif_err(qdev, drv, qdev->ndev,
1201 "Failed Dump of CODE RAM. Status = 0x%.08x\n",
1202 status);
1203 goto err;
1204 }
1205
1206 /* Insert the segment header */
1207 ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
1208 MEMC_RAM_SEG_NUM,
1209 sizeof(struct mpi_coredump_segment_header)
1210 + sizeof(mpi_coredump->memc_ram),
1211 "MEMC RAM");
1212 status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
1213 MEMC_RAM_ADDR, MEMC_RAM_CNT);
1214 if (status) {
1215 netif_err(qdev, drv, qdev->ndev,
1216 "Failed Dump of MEMC RAM. Status = 0x%.08x\n",
1217 status);
1218 goto err;
1219 }
1220err:
1221 ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
1222 return status;
1223
1224}
1225
1226static void ql_get_core_dump(struct ql_adapter *qdev)
1227{
1228 if (!ql_own_firmware(qdev)) {
1229 netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
1230 return;
1231 }
1232
1233 if (!netif_running(qdev->ndev)) {
1234 netif_err(qdev, ifup, qdev->ndev,
1235 "Force Coredump can only be done from interface that is up.\n");
1236 return;
1237 }
1238
1239 if (ql_mb_sys_err(qdev)) {
1240 netif_err(qdev, ifup, qdev->ndev,
1241 "Fail force coredump with ql_mb_sys_err().\n");
1242 return;
1243 }
1244}
1245
106void ql_gen_reg_dump(struct ql_adapter *qdev, 1246void ql_gen_reg_dump(struct ql_adapter *qdev,
107 struct ql_reg_dump *mpi_coredump) 1247 struct ql_reg_dump *mpi_coredump)
108{ 1248{
@@ -178,6 +1318,37 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
178 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); 1318 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
179 if (status) 1319 if (status)
180 return; 1320 return;
1321
1322 if (test_bit(QL_FRC_COREDUMP, &qdev->flags))
1323 ql_get_core_dump(qdev);
1324}
1325
1326/* Coredump to messages log file using separate worker thread */
1327void ql_mpi_core_to_log(struct work_struct *work)
1328{
1329 struct ql_adapter *qdev =
1330 container_of(work, struct ql_adapter, mpi_core_to_log.work);
1331 u32 *tmp, count;
1332 int i;
1333
1334 count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
1335 tmp = (u32 *)qdev->mpi_coredump;
1336 netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
1337 "Core is dumping to log file!\n");
1338
1339 for (i = 0; i < count; i += 8) {
1340 printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x "
1341 "%.08x %.08x %.08x \n", i,
1342 tmp[i + 0],
1343 tmp[i + 1],
1344 tmp[i + 2],
1345 tmp[i + 3],
1346 tmp[i + 4],
1347 tmp[i + 5],
1348 tmp[i + 6],
1349 tmp[i + 7]);
1350 msleep(5);
1351 }
181} 1352}
182 1353
183#ifdef QL_REG_DUMP 1354#ifdef QL_REG_DUMP
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 058fa0a48c6f..05b8bde9980d 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -67,8 +67,8 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
67 status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), 67 status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
68 CFG_LCQ, rx_ring->cq_id); 68 CFG_LCQ, rx_ring->cq_id);
69 if (status) { 69 if (status) {
70 QPRINTK(qdev, IFUP, ERR, 70 netif_err(qdev, ifup, qdev->ndev,
71 "Failed to load CQICB.\n"); 71 "Failed to load CQICB.\n");
72 goto exit; 72 goto exit;
73 } 73 }
74 } 74 }
@@ -89,8 +89,8 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
89 status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), 89 status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
90 CFG_LCQ, rx_ring->cq_id); 90 CFG_LCQ, rx_ring->cq_id);
91 if (status) { 91 if (status) {
92 QPRINTK(qdev, IFUP, ERR, 92 netif_err(qdev, ifup, qdev->ndev,
93 "Failed to load CQICB.\n"); 93 "Failed to load CQICB.\n");
94 goto exit; 94 goto exit;
95 } 95 }
96 } 96 }
@@ -107,8 +107,8 @@ static void ql_update_stats(struct ql_adapter *qdev)
107 107
108 spin_lock(&qdev->stats_lock); 108 spin_lock(&qdev->stats_lock);
109 if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { 109 if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
110 QPRINTK(qdev, DRV, ERR, 110 netif_err(qdev, drv, qdev->ndev,
111 "Couldn't get xgmac sem.\n"); 111 "Couldn't get xgmac sem.\n");
112 goto quit; 112 goto quit;
113 } 113 }
114 /* 114 /*
@@ -116,8 +116,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
116 */ 116 */
117 for (i = 0x200; i < 0x280; i += 8) { 117 for (i = 0x200; i < 0x280; i += 8) {
118 if (ql_read_xgmac_reg64(qdev, i, &data)) { 118 if (ql_read_xgmac_reg64(qdev, i, &data)) {
119 QPRINTK(qdev, DRV, ERR, 119 netif_err(qdev, drv, qdev->ndev,
120 "Error reading status register 0x%.04x.\n", i); 120 "Error reading status register 0x%.04x.\n",
121 i);
121 goto end; 122 goto end;
122 } else 123 } else
123 *iter = data; 124 *iter = data;
@@ -129,8 +130,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
129 */ 130 */
130 for (i = 0x300; i < 0x3d0; i += 8) { 131 for (i = 0x300; i < 0x3d0; i += 8) {
131 if (ql_read_xgmac_reg64(qdev, i, &data)) { 132 if (ql_read_xgmac_reg64(qdev, i, &data)) {
132 QPRINTK(qdev, DRV, ERR, 133 netif_err(qdev, drv, qdev->ndev,
133 "Error reading status register 0x%.04x.\n", i); 134 "Error reading status register 0x%.04x.\n",
135 i);
134 goto end; 136 goto end;
135 } else 137 } else
136 *iter = data; 138 *iter = data;
@@ -142,8 +144,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
142 */ 144 */
143 for (i = 0x500; i < 0x540; i += 8) { 145 for (i = 0x500; i < 0x540; i += 8) {
144 if (ql_read_xgmac_reg64(qdev, i, &data)) { 146 if (ql_read_xgmac_reg64(qdev, i, &data)) {
145 QPRINTK(qdev, DRV, ERR, 147 netif_err(qdev, drv, qdev->ndev,
146 "Error reading status register 0x%.04x.\n", i); 148 "Error reading status register 0x%.04x.\n",
149 i);
147 goto end; 150 goto end;
148 } else 151 } else
149 *iter = data; 152 *iter = data;
@@ -155,8 +158,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
155 */ 158 */
156 for (i = 0x568; i < 0x5a8; i += 8) { 159 for (i = 0x568; i < 0x5a8; i += 8) {
157 if (ql_read_xgmac_reg64(qdev, i, &data)) { 160 if (ql_read_xgmac_reg64(qdev, i, &data)) {
158 QPRINTK(qdev, DRV, ERR, 161 netif_err(qdev, drv, qdev->ndev,
159 "Error reading status register 0x%.04x.\n", i); 162 "Error reading status register 0x%.04x.\n",
163 i);
160 goto end; 164 goto end;
161 } else 165 } else
162 *iter = data; 166 *iter = data;
@@ -167,8 +171,8 @@ static void ql_update_stats(struct ql_adapter *qdev)
167 * Get RX NIC FIFO DROP statistics. 171 * Get RX NIC FIFO DROP statistics.
168 */ 172 */
169 if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) { 173 if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) {
170 QPRINTK(qdev, DRV, ERR, 174 netif_err(qdev, drv, qdev->ndev,
171 "Error reading status register 0x%.04x.\n", i); 175 "Error reading status register 0x%.04x.\n", i);
172 goto end; 176 goto end;
173 } else 177 } else
174 *iter = data; 178 *iter = data;
@@ -396,14 +400,13 @@ static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
396 return -EINVAL; 400 return -EINVAL;
397 qdev->wol = wol->wolopts; 401 qdev->wol = wol->wolopts;
398 402
399 QPRINTK(qdev, DRV, INFO, "Set wol option 0x%x on %s\n", 403 netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
400 qdev->wol, ndev->name);
401 if (!qdev->wol) { 404 if (!qdev->wol) {
402 u32 wol = 0; 405 u32 wol = 0;
403 status = ql_mb_wol_mode(qdev, wol); 406 status = ql_mb_wol_mode(qdev, wol);
404 QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n", 407 netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n",
405 (status == 0) ? "cleared sucessfully" : "clear failed", 408 status == 0 ? "cleared sucessfully" : "clear failed",
406 wol, qdev->ndev->name); 409 wol);
407 } 410 }
408 411
409 return 0; 412 return 0;
@@ -500,7 +503,8 @@ static int ql_run_loopback_test(struct ql_adapter *qdev)
500 return -EPIPE; 503 return -EPIPE;
501 atomic_inc(&qdev->lb_count); 504 atomic_inc(&qdev->lb_count);
502 } 505 }
503 506 /* Give queue time to settle before testing results. */
507 msleep(2);
504 ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128); 508 ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
505 return atomic_read(&qdev->lb_count) ? -EIO : 0; 509 return atomic_read(&qdev->lb_count) ? -EIO : 0;
506} 510}
@@ -533,9 +537,13 @@ static void ql_self_test(struct net_device *ndev,
533 data[0] = 0; 537 data[0] = 0;
534 } 538 }
535 clear_bit(QL_SELFTEST, &qdev->flags); 539 clear_bit(QL_SELFTEST, &qdev->flags);
540 /* Give link time to come up after
541 * port configuration changes.
542 */
543 msleep_interruptible(4 * 1000);
536 } else { 544 } else {
537 QPRINTK(qdev, DRV, ERR, 545 netif_err(qdev, drv, qdev->ndev,
538 "%s: is down, Loopback test will fail.\n", ndev->name); 546 "is down, Loopback test will fail.\n");
539 eth_test->flags |= ETH_TEST_FL_FAILED; 547 eth_test->flags |= ETH_TEST_FL_FAILED;
540 } 548 }
541} 549}
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 894a7c84faef..c26ec5d740f6 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -73,7 +73,19 @@ static int qlge_irq_type = MSIX_IRQ;
73module_param(qlge_irq_type, int, MSIX_IRQ); 73module_param(qlge_irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); 74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75 75
76static struct pci_device_id qlge_pci_tbl[] __devinitdata = { 76static int qlge_mpi_coredump;
77module_param(qlge_mpi_coredump, int, 0);
78MODULE_PARM_DESC(qlge_mpi_coredump,
79 "Option to enable MPI firmware dump. "
80 "Default is OFF - Do Not allocate memory. ");
81
82static int qlge_force_coredump;
83module_param(qlge_force_coredump, int, 0);
84MODULE_PARM_DESC(qlge_force_coredump,
85 "Option to allow force of firmware core dump. "
86 "Default is OFF - Do not allow.");
87
88static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
77 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)}, 89 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)}, 90 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
79 /* required last entry */ 91 /* required last entry */
@@ -116,7 +128,7 @@ static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
116 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT; 128 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
117 break; 129 break;
118 default: 130 default:
119 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n"); 131 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
120 return -EINVAL; 132 return -EINVAL;
121 } 133 }
122 134
@@ -156,17 +168,17 @@ int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
156 168
157 /* check for errors */ 169 /* check for errors */
158 if (temp & err_bit) { 170 if (temp & err_bit) {
159 QPRINTK(qdev, PROBE, ALERT, 171 netif_alert(qdev, probe, qdev->ndev,
160 "register 0x%.08x access error, value = 0x%.08x!.\n", 172 "register 0x%.08x access error, value = 0x%.08x!.\n",
161 reg, temp); 173 reg, temp);
162 return -EIO; 174 return -EIO;
163 } else if (temp & bit) 175 } else if (temp & bit)
164 return 0; 176 return 0;
165 udelay(UDELAY_DELAY); 177 udelay(UDELAY_DELAY);
166 count--; 178 count--;
167 } 179 }
168 QPRINTK(qdev, PROBE, ALERT, 180 netif_alert(qdev, probe, qdev->ndev,
169 "Timed out waiting for reg %x to come ready.\n", reg); 181 "Timed out waiting for reg %x to come ready.\n", reg);
170 return -ETIMEDOUT; 182 return -ETIMEDOUT;
171} 183}
172 184
@@ -209,7 +221,7 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
209 221
210 map = pci_map_single(qdev->pdev, ptr, size, direction); 222 map = pci_map_single(qdev->pdev, ptr, size, direction);
211 if (pci_dma_mapping_error(qdev->pdev, map)) { 223 if (pci_dma_mapping_error(qdev->pdev, map)) {
212 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n"); 224 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
213 return -ENOMEM; 225 return -ENOMEM;
214 } 226 }
215 227
@@ -219,8 +231,8 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
219 231
220 status = ql_wait_cfg(qdev, bit); 232 status = ql_wait_cfg(qdev, bit);
221 if (status) { 233 if (status) {
222 QPRINTK(qdev, IFUP, ERR, 234 netif_err(qdev, ifup, qdev->ndev,
223 "Timed out waiting for CFG to come ready.\n"); 235 "Timed out waiting for CFG to come ready.\n");
224 goto exit; 236 goto exit;
225 } 237 }
226 238
@@ -301,8 +313,8 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
301 case MAC_ADDR_TYPE_VLAN: 313 case MAC_ADDR_TYPE_VLAN:
302 case MAC_ADDR_TYPE_MULTI_FLTR: 314 case MAC_ADDR_TYPE_MULTI_FLTR:
303 default: 315 default:
304 QPRINTK(qdev, IFUP, CRIT, 316 netif_crit(qdev, ifup, qdev->ndev,
305 "Address type %d not yet supported.\n", type); 317 "Address type %d not yet supported.\n", type);
306 status = -EPERM; 318 status = -EPERM;
307 } 319 }
308exit: 320exit:
@@ -359,12 +371,11 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
359 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | 371 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
360 (addr[5]); 372 (addr[5]);
361 373
362 QPRINTK(qdev, IFUP, DEBUG, 374 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
363 "Adding %s address %pM" 375 "Adding %s address %pM at index %d in the CAM.\n",
364 " at index %d in the CAM.\n", 376 type == MAC_ADDR_TYPE_MULTI_MAC ?
365 ((type == 377 "MULTICAST" : "UNICAST",
366 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" : 378 addr, index);
367 "UNICAST"), addr, index);
368 379
369 status = 380 status =
370 ql_wait_reg_rdy(qdev, 381 ql_wait_reg_rdy(qdev,
@@ -414,9 +425,11 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
414 * addressing. It's either MAC_ADDR_E on or off. 425 * addressing. It's either MAC_ADDR_E on or off.
415 * That's bit-27 we're talking about. 426 * That's bit-27 we're talking about.
416 */ 427 */
417 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n", 428 netif_info(qdev, ifup, qdev->ndev,
418 (enable_bit ? "Adding" : "Removing"), 429 "%s VLAN ID %d %s the CAM.\n",
419 index, (enable_bit ? "to" : "from")); 430 enable_bit ? "Adding" : "Removing",
431 index,
432 enable_bit ? "to" : "from");
420 433
421 status = 434 status =
422 ql_wait_reg_rdy(qdev, 435 ql_wait_reg_rdy(qdev,
@@ -431,8 +444,8 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
431 } 444 }
432 case MAC_ADDR_TYPE_MULTI_FLTR: 445 case MAC_ADDR_TYPE_MULTI_FLTR:
433 default: 446 default:
434 QPRINTK(qdev, IFUP, CRIT, 447 netif_crit(qdev, ifup, qdev->ndev,
435 "Address type %d not yet supported.\n", type); 448 "Address type %d not yet supported.\n", type);
436 status = -EPERM; 449 status = -EPERM;
437 } 450 }
438exit: 451exit:
@@ -450,17 +463,14 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
450 char *addr; 463 char *addr;
451 464
452 if (set) { 465 if (set) {
453 addr = &qdev->ndev->dev_addr[0]; 466 addr = &qdev->current_mac_addr[0];
454 QPRINTK(qdev, IFUP, DEBUG, 467 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
455 "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n", 468 "Set Mac addr %pM\n", addr);
456 addr[0], addr[1], addr[2], addr[3],
457 addr[4], addr[5]);
458 } else { 469 } else {
459 memset(zero_mac_addr, 0, ETH_ALEN); 470 memset(zero_mac_addr, 0, ETH_ALEN);
460 addr = &zero_mac_addr[0]; 471 addr = &zero_mac_addr[0];
461 QPRINTK(qdev, IFUP, DEBUG, 472 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
462 "Clearing MAC address on %s\n", 473 "Clearing MAC address\n");
463 qdev->ndev->name);
464 } 474 }
465 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 475 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
466 if (status) 476 if (status)
@@ -469,23 +479,21 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
469 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); 479 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
470 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 480 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
471 if (status) 481 if (status)
472 QPRINTK(qdev, IFUP, ERR, "Failed to init mac " 482 netif_err(qdev, ifup, qdev->ndev,
473 "address.\n"); 483 "Failed to init mac address.\n");
474 return status; 484 return status;
475} 485}
476 486
477void ql_link_on(struct ql_adapter *qdev) 487void ql_link_on(struct ql_adapter *qdev)
478{ 488{
479 QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n", 489 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
480 qdev->ndev->name);
481 netif_carrier_on(qdev->ndev); 490 netif_carrier_on(qdev->ndev);
482 ql_set_mac_addr(qdev, 1); 491 ql_set_mac_addr(qdev, 1);
483} 492}
484 493
485void ql_link_off(struct ql_adapter *qdev) 494void ql_link_off(struct ql_adapter *qdev)
486{ 495{
487 QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n", 496 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
488 qdev->ndev->name);
489 netif_carrier_off(qdev->ndev); 497 netif_carrier_off(qdev->ndev);
490 ql_set_mac_addr(qdev, 0); 498 ql_set_mac_addr(qdev, 0);
491} 499}
@@ -522,27 +530,27 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
522 int status = -EINVAL; /* Return error if no mask match. */ 530 int status = -EINVAL; /* Return error if no mask match. */
523 u32 value = 0; 531 u32 value = 0;
524 532
525 QPRINTK(qdev, IFUP, DEBUG, 533 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
526 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n", 534 "%s %s mask %s the routing reg.\n",
527 (enable ? "Adding" : "Removing"), 535 enable ? "Adding" : "Removing",
528 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""), 536 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
529 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""), 537 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
530 ((index == 538 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
531 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""), 539 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
532 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""), 540 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
533 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""), 541 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
534 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""), 542 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
535 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""), 543 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
536 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""), 544 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
537 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""), 545 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
538 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""), 546 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
539 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""), 547 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
540 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""), 548 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
541 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""), 549 index == RT_IDX_UNUSED013 ? "UNUSED13" :
542 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""), 550 index == RT_IDX_UNUSED014 ? "UNUSED14" :
543 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""), 551 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
544 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""), 552 "(Bad index != RT_IDX)",
545 (enable ? "to" : "from")); 553 enable ? "to" : "from");
546 554
547 switch (mask) { 555 switch (mask) {
548 case RT_IDX_CAM_HIT: 556 case RT_IDX_CAM_HIT:
@@ -602,8 +610,8 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
602 break; 610 break;
603 } 611 }
604 default: 612 default:
605 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n", 613 netif_err(qdev, ifup, qdev->ndev,
606 mask); 614 "Mask type %d not yet supported.\n", mask);
607 status = -EPERM; 615 status = -EPERM;
608 goto exit; 616 goto exit;
609 } 617 }
@@ -709,7 +717,7 @@ static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
709 717
710 status = strncmp((char *)&qdev->flash, str, 4); 718 status = strncmp((char *)&qdev->flash, str, 4);
711 if (status) { 719 if (status) {
712 QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n"); 720 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
713 return status; 721 return status;
714 } 722 }
715 723
@@ -717,8 +725,8 @@ static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
717 csum += le16_to_cpu(*flash++); 725 csum += le16_to_cpu(*flash++);
718 726
719 if (csum) 727 if (csum)
720 QPRINTK(qdev, IFUP, ERR, 728 netif_err(qdev, ifup, qdev->ndev,
721 "Invalid flash checksum, csum = 0x%.04x.\n", csum); 729 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
722 730
723 return csum; 731 return csum;
724} 732}
@@ -770,7 +778,8 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
770 for (i = 0; i < size; i++, p++) { 778 for (i = 0; i < size; i++, p++) {
771 status = ql_read_flash_word(qdev, i+offset, p); 779 status = ql_read_flash_word(qdev, i+offset, p);
772 if (status) { 780 if (status) {
773 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n"); 781 netif_err(qdev, ifup, qdev->ndev,
782 "Error reading flash.\n");
774 goto exit; 783 goto exit;
775 } 784 }
776 } 785 }
@@ -779,7 +788,7 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
779 sizeof(struct flash_params_8000) / sizeof(u16), 788 sizeof(struct flash_params_8000) / sizeof(u16),
780 "8000"); 789 "8000");
781 if (status) { 790 if (status) {
782 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n"); 791 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
783 status = -EINVAL; 792 status = -EINVAL;
784 goto exit; 793 goto exit;
785 } 794 }
@@ -797,7 +806,7 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
797 qdev->ndev->addr_len); 806 qdev->ndev->addr_len);
798 807
799 if (!is_valid_ether_addr(mac_addr)) { 808 if (!is_valid_ether_addr(mac_addr)) {
800 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n"); 809 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
801 status = -EINVAL; 810 status = -EINVAL;
802 goto exit; 811 goto exit;
803 } 812 }
@@ -831,7 +840,8 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev)
831 for (i = 0; i < size; i++, p++) { 840 for (i = 0; i < size; i++, p++) {
832 status = ql_read_flash_word(qdev, i+offset, p); 841 status = ql_read_flash_word(qdev, i+offset, p);
833 if (status) { 842 if (status) {
834 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n"); 843 netif_err(qdev, ifup, qdev->ndev,
844 "Error reading flash.\n");
835 goto exit; 845 goto exit;
836 } 846 }
837 847
@@ -841,7 +851,7 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev)
841 sizeof(struct flash_params_8012) / sizeof(u16), 851 sizeof(struct flash_params_8012) / sizeof(u16),
842 "8012"); 852 "8012");
843 if (status) { 853 if (status) {
844 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n"); 854 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
845 status = -EINVAL; 855 status = -EINVAL;
846 goto exit; 856 goto exit;
847 } 857 }
@@ -959,17 +969,17 @@ static int ql_8012_port_initialize(struct ql_adapter *qdev)
959 /* Another function has the semaphore, so 969 /* Another function has the semaphore, so
960 * wait for the port init bit to come ready. 970 * wait for the port init bit to come ready.
961 */ 971 */
962 QPRINTK(qdev, LINK, INFO, 972 netif_info(qdev, link, qdev->ndev,
963 "Another function has the semaphore, so wait for the port init bit to come ready.\n"); 973 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
964 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0); 974 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
965 if (status) { 975 if (status) {
966 QPRINTK(qdev, LINK, CRIT, 976 netif_crit(qdev, link, qdev->ndev,
967 "Port initialize timed out.\n"); 977 "Port initialize timed out.\n");
968 } 978 }
969 return status; 979 return status;
970 } 980 }
971 981
972 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n"); 982 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
973 /* Set the core reset. */ 983 /* Set the core reset. */
974 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data); 984 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
975 if (status) 985 if (status)
@@ -1099,8 +1109,8 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1099 GFP_ATOMIC, 1109 GFP_ATOMIC,
1100 qdev->lbq_buf_order); 1110 qdev->lbq_buf_order);
1101 if (unlikely(!rx_ring->pg_chunk.page)) { 1111 if (unlikely(!rx_ring->pg_chunk.page)) {
1102 QPRINTK(qdev, DRV, ERR, 1112 netif_err(qdev, drv, qdev->ndev,
1103 "page allocation failed.\n"); 1113 "page allocation failed.\n");
1104 return -ENOMEM; 1114 return -ENOMEM;
1105 } 1115 }
1106 rx_ring->pg_chunk.offset = 0; 1116 rx_ring->pg_chunk.offset = 0;
@@ -1110,8 +1120,8 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1110 if (pci_dma_mapping_error(qdev->pdev, map)) { 1120 if (pci_dma_mapping_error(qdev->pdev, map)) {
1111 __free_pages(rx_ring->pg_chunk.page, 1121 __free_pages(rx_ring->pg_chunk.page,
1112 qdev->lbq_buf_order); 1122 qdev->lbq_buf_order);
1113 QPRINTK(qdev, DRV, ERR, 1123 netif_err(qdev, drv, qdev->ndev,
1114 "PCI mapping failed.\n"); 1124 "PCI mapping failed.\n");
1115 return -ENOMEM; 1125 return -ENOMEM;
1116 } 1126 }
1117 rx_ring->pg_chunk.map = map; 1127 rx_ring->pg_chunk.map = map;
@@ -1148,15 +1158,15 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1148 1158
1149 while (rx_ring->lbq_free_cnt > 32) { 1159 while (rx_ring->lbq_free_cnt > 32) {
1150 for (i = 0; i < 16; i++) { 1160 for (i = 0; i < 16; i++) {
1151 QPRINTK(qdev, RX_STATUS, DEBUG, 1161 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1152 "lbq: try cleaning clean_idx = %d.\n", 1162 "lbq: try cleaning clean_idx = %d.\n",
1153 clean_idx); 1163 clean_idx);
1154 lbq_desc = &rx_ring->lbq[clean_idx]; 1164 lbq_desc = &rx_ring->lbq[clean_idx];
1155 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { 1165 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1156 QPRINTK(qdev, IFUP, ERR, 1166 netif_err(qdev, ifup, qdev->ndev,
1157 "Could not get a page chunk.\n"); 1167 "Could not get a page chunk.\n");
1158 return; 1168 return;
1159 } 1169 }
1160 1170
1161 map = lbq_desc->p.pg_chunk.map + 1171 map = lbq_desc->p.pg_chunk.map +
1162 lbq_desc->p.pg_chunk.offset; 1172 lbq_desc->p.pg_chunk.offset;
@@ -1181,9 +1191,9 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1181 } 1191 }
1182 1192
1183 if (start_idx != clean_idx) { 1193 if (start_idx != clean_idx) {
1184 QPRINTK(qdev, RX_STATUS, DEBUG, 1194 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1185 "lbq: updating prod idx = %d.\n", 1195 "lbq: updating prod idx = %d.\n",
1186 rx_ring->lbq_prod_idx); 1196 rx_ring->lbq_prod_idx);
1187 ql_write_db_reg(rx_ring->lbq_prod_idx, 1197 ql_write_db_reg(rx_ring->lbq_prod_idx,
1188 rx_ring->lbq_prod_idx_db_reg); 1198 rx_ring->lbq_prod_idx_db_reg);
1189 } 1199 }
@@ -1201,19 +1211,20 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1201 while (rx_ring->sbq_free_cnt > 16) { 1211 while (rx_ring->sbq_free_cnt > 16) {
1202 for (i = 0; i < 16; i++) { 1212 for (i = 0; i < 16; i++) {
1203 sbq_desc = &rx_ring->sbq[clean_idx]; 1213 sbq_desc = &rx_ring->sbq[clean_idx];
1204 QPRINTK(qdev, RX_STATUS, DEBUG, 1214 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1205 "sbq: try cleaning clean_idx = %d.\n", 1215 "sbq: try cleaning clean_idx = %d.\n",
1206 clean_idx); 1216 clean_idx);
1207 if (sbq_desc->p.skb == NULL) { 1217 if (sbq_desc->p.skb == NULL) {
1208 QPRINTK(qdev, RX_STATUS, DEBUG, 1218 netif_printk(qdev, rx_status, KERN_DEBUG,
1209 "sbq: getting new skb for index %d.\n", 1219 qdev->ndev,
1210 sbq_desc->index); 1220 "sbq: getting new skb for index %d.\n",
1221 sbq_desc->index);
1211 sbq_desc->p.skb = 1222 sbq_desc->p.skb =
1212 netdev_alloc_skb(qdev->ndev, 1223 netdev_alloc_skb(qdev->ndev,
1213 SMALL_BUFFER_SIZE); 1224 SMALL_BUFFER_SIZE);
1214 if (sbq_desc->p.skb == NULL) { 1225 if (sbq_desc->p.skb == NULL) {
1215 QPRINTK(qdev, PROBE, ERR, 1226 netif_err(qdev, probe, qdev->ndev,
1216 "Couldn't get an skb.\n"); 1227 "Couldn't get an skb.\n");
1217 rx_ring->sbq_clean_idx = clean_idx; 1228 rx_ring->sbq_clean_idx = clean_idx;
1218 return; 1229 return;
1219 } 1230 }
@@ -1223,7 +1234,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1223 rx_ring->sbq_buf_size, 1234 rx_ring->sbq_buf_size,
1224 PCI_DMA_FROMDEVICE); 1235 PCI_DMA_FROMDEVICE);
1225 if (pci_dma_mapping_error(qdev->pdev, map)) { 1236 if (pci_dma_mapping_error(qdev->pdev, map)) {
1226 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n"); 1237 netif_err(qdev, ifup, qdev->ndev,
1238 "PCI mapping failed.\n");
1227 rx_ring->sbq_clean_idx = clean_idx; 1239 rx_ring->sbq_clean_idx = clean_idx;
1228 dev_kfree_skb_any(sbq_desc->p.skb); 1240 dev_kfree_skb_any(sbq_desc->p.skb);
1229 sbq_desc->p.skb = NULL; 1241 sbq_desc->p.skb = NULL;
@@ -1247,9 +1259,9 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1247 } 1259 }
1248 1260
1249 if (start_idx != clean_idx) { 1261 if (start_idx != clean_idx) {
1250 QPRINTK(qdev, RX_STATUS, DEBUG, 1262 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251 "sbq: updating prod idx = %d.\n", 1263 "sbq: updating prod idx = %d.\n",
1252 rx_ring->sbq_prod_idx); 1264 rx_ring->sbq_prod_idx);
1253 ql_write_db_reg(rx_ring->sbq_prod_idx, 1265 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 rx_ring->sbq_prod_idx_db_reg); 1266 rx_ring->sbq_prod_idx_db_reg);
1255 } 1267 }
@@ -1281,8 +1293,9 @@ static void ql_unmap_send(struct ql_adapter *qdev,
1281 * then its an OAL. 1293 * then its an OAL.
1282 */ 1294 */
1283 if (i == 7) { 1295 if (i == 7) {
1284 QPRINTK(qdev, TX_DONE, DEBUG, 1296 netif_printk(qdev, tx_done, KERN_DEBUG,
1285 "unmapping OAL area.\n"); 1297 qdev->ndev,
1298 "unmapping OAL area.\n");
1286 } 1299 }
1287 pci_unmap_single(qdev->pdev, 1300 pci_unmap_single(qdev->pdev,
1288 pci_unmap_addr(&tx_ring_desc->map[i], 1301 pci_unmap_addr(&tx_ring_desc->map[i],
@@ -1291,8 +1304,8 @@ static void ql_unmap_send(struct ql_adapter *qdev,
1291 maplen), 1304 maplen),
1292 PCI_DMA_TODEVICE); 1305 PCI_DMA_TODEVICE);
1293 } else { 1306 } else {
1294 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n", 1307 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1295 i); 1308 "unmapping frag %d.\n", i);
1296 pci_unmap_page(qdev->pdev, 1309 pci_unmap_page(qdev->pdev,
1297 pci_unmap_addr(&tx_ring_desc->map[i], 1310 pci_unmap_addr(&tx_ring_desc->map[i],
1298 mapaddr), 1311 mapaddr),
@@ -1317,7 +1330,8 @@ static int ql_map_send(struct ql_adapter *qdev,
1317 int frag_cnt = skb_shinfo(skb)->nr_frags; 1330 int frag_cnt = skb_shinfo(skb)->nr_frags;
1318 1331
1319 if (frag_cnt) { 1332 if (frag_cnt) {
1320 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt); 1333 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1334 "frag_cnt = %d.\n", frag_cnt);
1321 } 1335 }
1322 /* 1336 /*
1323 * Map the skb buffer first. 1337 * Map the skb buffer first.
@@ -1326,8 +1340,8 @@ static int ql_map_send(struct ql_adapter *qdev,
1326 1340
1327 err = pci_dma_mapping_error(qdev->pdev, map); 1341 err = pci_dma_mapping_error(qdev->pdev, map);
1328 if (err) { 1342 if (err) {
1329 QPRINTK(qdev, TX_QUEUED, ERR, 1343 netif_err(qdev, tx_queued, qdev->ndev,
1330 "PCI mapping failed with error: %d\n", err); 1344 "PCI mapping failed with error: %d\n", err);
1331 1345
1332 return NETDEV_TX_BUSY; 1346 return NETDEV_TX_BUSY;
1333 } 1347 }
@@ -1373,9 +1387,9 @@ static int ql_map_send(struct ql_adapter *qdev,
1373 PCI_DMA_TODEVICE); 1387 PCI_DMA_TODEVICE);
1374 err = pci_dma_mapping_error(qdev->pdev, map); 1388 err = pci_dma_mapping_error(qdev->pdev, map);
1375 if (err) { 1389 if (err) {
1376 QPRINTK(qdev, TX_QUEUED, ERR, 1390 netif_err(qdev, tx_queued, qdev->ndev,
1377 "PCI mapping outbound address list with error: %d\n", 1391 "PCI mapping outbound address list with error: %d\n",
1378 err); 1392 err);
1379 goto map_error; 1393 goto map_error;
1380 } 1394 }
1381 1395
@@ -1403,9 +1417,9 @@ static int ql_map_send(struct ql_adapter *qdev,
1403 1417
1404 err = pci_dma_mapping_error(qdev->pdev, map); 1418 err = pci_dma_mapping_error(qdev->pdev, map);
1405 if (err) { 1419 if (err) {
1406 QPRINTK(qdev, TX_QUEUED, ERR, 1420 netif_err(qdev, tx_queued, qdev->ndev,
1407 "PCI mapping frags failed with error: %d.\n", 1421 "PCI mapping frags failed with error: %d.\n",
1408 err); 1422 err);
1409 goto map_error; 1423 goto map_error;
1410 } 1424 }
1411 1425
@@ -1433,6 +1447,260 @@ map_error:
1433 return NETDEV_TX_BUSY; 1447 return NETDEV_TX_BUSY;
1434} 1448}
1435 1449
1450/* Process an inbound completion from an rx ring. */
1451static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1452 struct rx_ring *rx_ring,
1453 struct ib_mac_iocb_rsp *ib_mac_rsp,
1454 u32 length,
1455 u16 vlan_id)
1456{
1457 struct sk_buff *skb;
1458 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1459 struct skb_frag_struct *rx_frag;
1460 int nr_frags;
1461 struct napi_struct *napi = &rx_ring->napi;
1462
1463 napi->dev = qdev->ndev;
1464
1465 skb = napi_get_frags(napi);
1466 if (!skb) {
1467 netif_err(qdev, drv, qdev->ndev,
1468 "Couldn't get an skb, exiting.\n");
1469 rx_ring->rx_dropped++;
1470 put_page(lbq_desc->p.pg_chunk.page);
1471 return;
1472 }
1473 prefetch(lbq_desc->p.pg_chunk.va);
1474 rx_frag = skb_shinfo(skb)->frags;
1475 nr_frags = skb_shinfo(skb)->nr_frags;
1476 rx_frag += nr_frags;
1477 rx_frag->page = lbq_desc->p.pg_chunk.page;
1478 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1479 rx_frag->size = length;
1480
1481 skb->len += length;
1482 skb->data_len += length;
1483 skb->truesize += length;
1484 skb_shinfo(skb)->nr_frags++;
1485
1486 rx_ring->rx_packets++;
1487 rx_ring->rx_bytes += length;
1488 skb->ip_summed = CHECKSUM_UNNECESSARY;
1489 skb_record_rx_queue(skb, rx_ring->cq_id);
1490 if (qdev->vlgrp && (vlan_id != 0xffff))
1491 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1492 else
1493 napi_gro_frags(napi);
1494}
1495
1496/* Process an inbound completion from an rx ring. */
1497static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1498 struct rx_ring *rx_ring,
1499 struct ib_mac_iocb_rsp *ib_mac_rsp,
1500 u32 length,
1501 u16 vlan_id)
1502{
1503 struct net_device *ndev = qdev->ndev;
1504 struct sk_buff *skb = NULL;
1505 void *addr;
1506 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1507 struct napi_struct *napi = &rx_ring->napi;
1508
1509 skb = netdev_alloc_skb(ndev, length);
1510 if (!skb) {
1511 netif_err(qdev, drv, qdev->ndev,
1512 "Couldn't get an skb, need to unwind!.\n");
1513 rx_ring->rx_dropped++;
1514 put_page(lbq_desc->p.pg_chunk.page);
1515 return;
1516 }
1517
1518 addr = lbq_desc->p.pg_chunk.va;
1519 prefetch(addr);
1520
1521
1522 /* Frame error, so drop the packet. */
1523 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1524 netif_err(qdev, drv, qdev->ndev,
1525 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1526 rx_ring->rx_errors++;
1527 goto err_out;
1528 }
1529
1530 /* The max framesize filter on this chip is set higher than
1531 * MTU since FCoE uses 2k frames.
1532 */
1533 if (skb->len > ndev->mtu + ETH_HLEN) {
1534 netif_err(qdev, drv, qdev->ndev,
1535 "Segment too small, dropping.\n");
1536 rx_ring->rx_dropped++;
1537 goto err_out;
1538 }
1539 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1540 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1541 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1542 length);
1543 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1544 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1545 length-ETH_HLEN);
1546 skb->len += length-ETH_HLEN;
1547 skb->data_len += length-ETH_HLEN;
1548 skb->truesize += length-ETH_HLEN;
1549
1550 rx_ring->rx_packets++;
1551 rx_ring->rx_bytes += skb->len;
1552 skb->protocol = eth_type_trans(skb, ndev);
1553 skb->ip_summed = CHECKSUM_NONE;
1554
1555 if (qdev->rx_csum &&
1556 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1557 /* TCP frame. */
1558 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1559 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1560 "TCP checksum done!\n");
1561 skb->ip_summed = CHECKSUM_UNNECESSARY;
1562 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1563 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1564 /* Unfragmented ipv4 UDP frame. */
1565 struct iphdr *iph = (struct iphdr *) skb->data;
1566 if (!(iph->frag_off &
1567 cpu_to_be16(IP_MF|IP_OFFSET))) {
1568 skb->ip_summed = CHECKSUM_UNNECESSARY;
1569 netif_printk(qdev, rx_status, KERN_DEBUG,
1570 qdev->ndev,
1571 "TCP checksum done!\n");
1572 }
1573 }
1574 }
1575
1576 skb_record_rx_queue(skb, rx_ring->cq_id);
1577 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1578 if (qdev->vlgrp && (vlan_id != 0xffff))
1579 vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1580 else
1581 napi_gro_receive(napi, skb);
1582 } else {
1583 if (qdev->vlgrp && (vlan_id != 0xffff))
1584 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1585 else
1586 netif_receive_skb(skb);
1587 }
1588 return;
1589err_out:
1590 dev_kfree_skb_any(skb);
1591 put_page(lbq_desc->p.pg_chunk.page);
1592}
1593
1594/* Process an inbound completion from an rx ring. */
1595static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1596 struct rx_ring *rx_ring,
1597 struct ib_mac_iocb_rsp *ib_mac_rsp,
1598 u32 length,
1599 u16 vlan_id)
1600{
1601 struct net_device *ndev = qdev->ndev;
1602 struct sk_buff *skb = NULL;
1603 struct sk_buff *new_skb = NULL;
1604 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1605
1606 skb = sbq_desc->p.skb;
1607 /* Allocate new_skb and copy */
1608 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1609 if (new_skb == NULL) {
1610 netif_err(qdev, probe, qdev->ndev,
1611 "No skb available, drop the packet.\n");
1612 rx_ring->rx_dropped++;
1613 return;
1614 }
1615 skb_reserve(new_skb, NET_IP_ALIGN);
1616 memcpy(skb_put(new_skb, length), skb->data, length);
1617 skb = new_skb;
1618
1619 /* Frame error, so drop the packet. */
1620 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1621 netif_err(qdev, drv, qdev->ndev,
1622 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1623 dev_kfree_skb_any(skb);
1624 rx_ring->rx_errors++;
1625 return;
1626 }
1627
1628 /* loopback self test for ethtool */
1629 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1630 ql_check_lb_frame(qdev, skb);
1631 dev_kfree_skb_any(skb);
1632 return;
1633 }
1634
1635 /* The max framesize filter on this chip is set higher than
1636 * MTU since FCoE uses 2k frames.
1637 */
1638 if (skb->len > ndev->mtu + ETH_HLEN) {
1639 dev_kfree_skb_any(skb);
1640 rx_ring->rx_dropped++;
1641 return;
1642 }
1643
1644 prefetch(skb->data);
1645 skb->dev = ndev;
1646 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1647 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1648 "%s Multicast.\n",
1649 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1650 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1651 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1652 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1653 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1654 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1655 }
1656 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1657 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1658 "Promiscuous Packet.\n");
1659
1660 rx_ring->rx_packets++;
1661 rx_ring->rx_bytes += skb->len;
1662 skb->protocol = eth_type_trans(skb, ndev);
1663 skb->ip_summed = CHECKSUM_NONE;
1664
1665 /* If rx checksum is on, and there are no
1666 * csum or frame errors.
1667 */
1668 if (qdev->rx_csum &&
1669 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1670 /* TCP frame. */
1671 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1672 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1673 "TCP checksum done!\n");
1674 skb->ip_summed = CHECKSUM_UNNECESSARY;
1675 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1676 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1677 /* Unfragmented ipv4 UDP frame. */
1678 struct iphdr *iph = (struct iphdr *) skb->data;
1679 if (!(iph->frag_off &
1680 cpu_to_be16(IP_MF|IP_OFFSET))) {
1681 skb->ip_summed = CHECKSUM_UNNECESSARY;
1682 netif_printk(qdev, rx_status, KERN_DEBUG,
1683 qdev->ndev,
1684 "TCP checksum done!\n");
1685 }
1686 }
1687 }
1688
1689 skb_record_rx_queue(skb, rx_ring->cq_id);
1690 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1691 if (qdev->vlgrp && (vlan_id != 0xffff))
1692 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1693 vlan_id, skb);
1694 else
1695 napi_gro_receive(&rx_ring->napi, skb);
1696 } else {
1697 if (qdev->vlgrp && (vlan_id != 0xffff))
1698 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1699 else
1700 netif_receive_skb(skb);
1701 }
1702}
1703
1436static void ql_realign_skb(struct sk_buff *skb, int len) 1704static void ql_realign_skb(struct sk_buff *skb, int len)
1437{ 1705{
1438 void *temp_addr = skb->data; 1706 void *temp_addr = skb->data;
@@ -1467,7 +1735,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1467 */ 1735 */
1468 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV && 1736 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1469 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { 1737 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1470 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len); 1738 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1739 "Header of %d bytes in small buffer.\n", hdr_len);
1471 /* 1740 /*
1472 * Headers fit nicely into a small buffer. 1741 * Headers fit nicely into a small buffer.
1473 */ 1742 */
@@ -1486,15 +1755,16 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1486 * Handle the data buffer(s). 1755 * Handle the data buffer(s).
1487 */ 1756 */
1488 if (unlikely(!length)) { /* Is there data too? */ 1757 if (unlikely(!length)) { /* Is there data too? */
1489 QPRINTK(qdev, RX_STATUS, DEBUG, 1758 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1490 "No Data buffer in this packet.\n"); 1759 "No Data buffer in this packet.\n");
1491 return skb; 1760 return skb;
1492 } 1761 }
1493 1762
1494 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { 1763 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1495 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { 1764 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1496 QPRINTK(qdev, RX_STATUS, DEBUG, 1765 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1497 "Headers in small, data of %d bytes in small, combine them.\n", length); 1766 "Headers in small, data of %d bytes in small, combine them.\n",
1767 length);
1498 /* 1768 /*
1499 * Data is less than small buffer size so it's 1769 * Data is less than small buffer size so it's
1500 * stuffed in a small buffer. 1770 * stuffed in a small buffer.
@@ -1520,8 +1790,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1520 maplen), 1790 maplen),
1521 PCI_DMA_FROMDEVICE); 1791 PCI_DMA_FROMDEVICE);
1522 } else { 1792 } else {
1523 QPRINTK(qdev, RX_STATUS, DEBUG, 1793 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1524 "%d bytes in a single small buffer.\n", length); 1794 "%d bytes in a single small buffer.\n",
1795 length);
1525 sbq_desc = ql_get_curr_sbuf(rx_ring); 1796 sbq_desc = ql_get_curr_sbuf(rx_ring);
1526 skb = sbq_desc->p.skb; 1797 skb = sbq_desc->p.skb;
1527 ql_realign_skb(skb, length); 1798 ql_realign_skb(skb, length);
@@ -1536,18 +1807,18 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1536 } 1807 }
1537 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { 1808 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1538 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { 1809 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1539 QPRINTK(qdev, RX_STATUS, DEBUG, 1810 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1540 "Header in small, %d bytes in large. Chain large to small!\n", length); 1811 "Header in small, %d bytes in large. Chain large to small!\n",
1812 length);
1541 /* 1813 /*
1542 * The data is in a single large buffer. We 1814 * The data is in a single large buffer. We
1543 * chain it to the header buffer's skb and let 1815 * chain it to the header buffer's skb and let
1544 * it rip. 1816 * it rip.
1545 */ 1817 */
1546 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); 1818 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1547 QPRINTK(qdev, RX_STATUS, DEBUG, 1819 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1548 "Chaining page at offset = %d," 1820 "Chaining page at offset = %d, for %d bytes to skb.\n",
1549 "for %d bytes to skb.\n", 1821 lbq_desc->p.pg_chunk.offset, length);
1550 lbq_desc->p.pg_chunk.offset, length);
1551 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, 1822 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1552 lbq_desc->p.pg_chunk.offset, 1823 lbq_desc->p.pg_chunk.offset,
1553 length); 1824 length);
@@ -1563,8 +1834,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1563 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); 1834 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1564 skb = netdev_alloc_skb(qdev->ndev, length); 1835 skb = netdev_alloc_skb(qdev->ndev, length);
1565 if (skb == NULL) { 1836 if (skb == NULL) {
1566 QPRINTK(qdev, PROBE, DEBUG, 1837 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1567 "No skb available, drop the packet.\n"); 1838 "No skb available, drop the packet.\n");
1568 return NULL; 1839 return NULL;
1569 } 1840 }
1570 pci_unmap_page(qdev->pdev, 1841 pci_unmap_page(qdev->pdev,
@@ -1573,8 +1844,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1573 pci_unmap_len(lbq_desc, maplen), 1844 pci_unmap_len(lbq_desc, maplen),
1574 PCI_DMA_FROMDEVICE); 1845 PCI_DMA_FROMDEVICE);
1575 skb_reserve(skb, NET_IP_ALIGN); 1846 skb_reserve(skb, NET_IP_ALIGN);
1576 QPRINTK(qdev, RX_STATUS, DEBUG, 1847 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1577 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length); 1848 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1849 length);
1578 skb_fill_page_desc(skb, 0, 1850 skb_fill_page_desc(skb, 0,
1579 lbq_desc->p.pg_chunk.page, 1851 lbq_desc->p.pg_chunk.page,
1580 lbq_desc->p.pg_chunk.offset, 1852 lbq_desc->p.pg_chunk.offset,
@@ -1615,8 +1887,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1615 * a local buffer and use it to find the 1887 * a local buffer and use it to find the
1616 * pages to chain. 1888 * pages to chain.
1617 */ 1889 */
1618 QPRINTK(qdev, RX_STATUS, DEBUG, 1890 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1619 "%d bytes of headers & data in chain of large.\n", length); 1891 "%d bytes of headers & data in chain of large.\n",
1892 length);
1620 skb = sbq_desc->p.skb; 1893 skb = sbq_desc->p.skb;
1621 sbq_desc->p.skb = NULL; 1894 sbq_desc->p.skb = NULL;
1622 skb_reserve(skb, NET_IP_ALIGN); 1895 skb_reserve(skb, NET_IP_ALIGN);
@@ -1626,9 +1899,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1626 size = (length < rx_ring->lbq_buf_size) ? length : 1899 size = (length < rx_ring->lbq_buf_size) ? length :
1627 rx_ring->lbq_buf_size; 1900 rx_ring->lbq_buf_size;
1628 1901
1629 QPRINTK(qdev, RX_STATUS, DEBUG, 1902 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1630 "Adding page %d to skb for %d bytes.\n", 1903 "Adding page %d to skb for %d bytes.\n",
1631 i, size); 1904 i, size);
1632 skb_fill_page_desc(skb, i, 1905 skb_fill_page_desc(skb, i,
1633 lbq_desc->p.pg_chunk.page, 1906 lbq_desc->p.pg_chunk.page,
1634 lbq_desc->p.pg_chunk.offset, 1907 lbq_desc->p.pg_chunk.offset,
@@ -1646,29 +1919,28 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1646} 1919}
1647 1920
1648/* Process an inbound completion from an rx ring. */ 1921/* Process an inbound completion from an rx ring. */
1649static void ql_process_mac_rx_intr(struct ql_adapter *qdev, 1922static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1650 struct rx_ring *rx_ring, 1923 struct rx_ring *rx_ring,
1651 struct ib_mac_iocb_rsp *ib_mac_rsp) 1924 struct ib_mac_iocb_rsp *ib_mac_rsp,
1925 u16 vlan_id)
1652{ 1926{
1653 struct net_device *ndev = qdev->ndev; 1927 struct net_device *ndev = qdev->ndev;
1654 struct sk_buff *skb = NULL; 1928 struct sk_buff *skb = NULL;
1655 u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
1656 IB_MAC_IOCB_RSP_VLAN_MASK)
1657 1929
1658 QL_DUMP_IB_MAC_RSP(ib_mac_rsp); 1930 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1659 1931
1660 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp); 1932 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1661 if (unlikely(!skb)) { 1933 if (unlikely(!skb)) {
1662 QPRINTK(qdev, RX_STATUS, DEBUG, 1934 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1663 "No skb available, drop packet.\n"); 1935 "No skb available, drop packet.\n");
1664 rx_ring->rx_dropped++; 1936 rx_ring->rx_dropped++;
1665 return; 1937 return;
1666 } 1938 }
1667 1939
1668 /* Frame error, so drop the packet. */ 1940 /* Frame error, so drop the packet. */
1669 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { 1941 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1670 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n", 1942 netif_err(qdev, drv, qdev->ndev,
1671 ib_mac_rsp->flags2); 1943 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1672 dev_kfree_skb_any(skb); 1944 dev_kfree_skb_any(skb);
1673 rx_ring->rx_errors++; 1945 rx_ring->rx_errors++;
1674 return; 1946 return;
@@ -1693,17 +1965,18 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1693 prefetch(skb->data); 1965 prefetch(skb->data);
1694 skb->dev = ndev; 1966 skb->dev = ndev;
1695 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { 1967 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1696 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n", 1968 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1697 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == 1969 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1698 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "", 1970 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1699 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == 1971 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1700 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "", 1972 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1701 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == 1973 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1702 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); 1974 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1703 rx_ring->rx_multicast++; 1975 rx_ring->rx_multicast++;
1704 } 1976 }
1705 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { 1977 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1706 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n"); 1978 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1979 "Promiscuous Packet.\n");
1707 } 1980 }
1708 1981
1709 skb->protocol = eth_type_trans(skb, ndev); 1982 skb->protocol = eth_type_trans(skb, ndev);
@@ -1716,8 +1989,8 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1716 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { 1989 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1717 /* TCP frame. */ 1990 /* TCP frame. */
1718 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { 1991 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1719 QPRINTK(qdev, RX_STATUS, DEBUG, 1992 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1720 "TCP checksum done!\n"); 1993 "TCP checksum done!\n");
1721 skb->ip_summed = CHECKSUM_UNNECESSARY; 1994 skb->ip_summed = CHECKSUM_UNNECESSARY;
1722 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && 1995 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1723 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { 1996 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
@@ -1726,8 +1999,8 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1726 if (!(iph->frag_off & 1999 if (!(iph->frag_off &
1727 cpu_to_be16(IP_MF|IP_OFFSET))) { 2000 cpu_to_be16(IP_MF|IP_OFFSET))) {
1728 skb->ip_summed = CHECKSUM_UNNECESSARY; 2001 skb->ip_summed = CHECKSUM_UNNECESSARY;
1729 QPRINTK(qdev, RX_STATUS, DEBUG, 2002 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1730 "TCP checksum done!\n"); 2003 "TCP checksum done!\n");
1731 } 2004 }
1732 } 2005 }
1733 } 2006 }
@@ -1753,6 +2026,56 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1753 } 2026 }
1754} 2027}
1755 2028
2029/* Process an inbound completion from an rx ring. */
2030static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2031 struct rx_ring *rx_ring,
2032 struct ib_mac_iocb_rsp *ib_mac_rsp)
2033{
2034 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2035 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2036 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2037 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2038
2039 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2040
2041 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2042 /* The data and headers are split into
2043 * separate buffers.
2044 */
2045 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2046 vlan_id);
2047 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2048 /* The data fit in a single small buffer.
2049 * Allocate a new skb, copy the data and
2050 * return the buffer to the free pool.
2051 */
2052 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2053 length, vlan_id);
2054 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2055 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2056 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2057 /* TCP packet in a page chunk that's been checksummed.
2058 * Tack it on to our GRO skb and let it go.
2059 */
2060 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2061 length, vlan_id);
2062 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2063 /* Non-TCP packet in a page chunk. Allocate an
2064 * skb, tack it on frags, and send it up.
2065 */
2066 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2067 length, vlan_id);
2068 } else {
2069 /* Non-TCP/UDP large frames that span multiple buffers
2070 * can be processed corrrectly by the split frame logic.
2071 */
2072 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2073 vlan_id);
2074 }
2075
2076 return (unsigned long)length;
2077}
2078
1756/* Process an outbound completion from an rx ring. */ 2079/* Process an outbound completion from an rx ring. */
1757static void ql_process_mac_tx_intr(struct ql_adapter *qdev, 2080static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1758 struct ob_mac_iocb_rsp *mac_rsp) 2081 struct ob_mac_iocb_rsp *mac_rsp)
@@ -1774,20 +2097,20 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1774 OB_MAC_IOCB_RSP_L | 2097 OB_MAC_IOCB_RSP_L |
1775 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) { 2098 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1776 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) { 2099 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1777 QPRINTK(qdev, TX_DONE, WARNING, 2100 netif_warn(qdev, tx_done, qdev->ndev,
1778 "Total descriptor length did not match transfer length.\n"); 2101 "Total descriptor length did not match transfer length.\n");
1779 } 2102 }
1780 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) { 2103 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1781 QPRINTK(qdev, TX_DONE, WARNING, 2104 netif_warn(qdev, tx_done, qdev->ndev,
1782 "Frame too short to be legal, not sent.\n"); 2105 "Frame too short to be valid, not sent.\n");
1783 } 2106 }
1784 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) { 2107 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1785 QPRINTK(qdev, TX_DONE, WARNING, 2108 netif_warn(qdev, tx_done, qdev->ndev,
1786 "Frame too long, but sent anyway.\n"); 2109 "Frame too long, but sent anyway.\n");
1787 } 2110 }
1788 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) { 2111 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1789 QPRINTK(qdev, TX_DONE, WARNING, 2112 netif_warn(qdev, tx_done, qdev->ndev,
1790 "PCI backplane error. Frame not sent.\n"); 2113 "PCI backplane error. Frame not sent.\n");
1791 } 2114 }
1792 } 2115 }
1793 atomic_inc(&tx_ring->tx_count); 2116 atomic_inc(&tx_ring->tx_count);
@@ -1817,33 +2140,35 @@ static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1817{ 2140{
1818 switch (ib_ae_rsp->event) { 2141 switch (ib_ae_rsp->event) {
1819 case MGMT_ERR_EVENT: 2142 case MGMT_ERR_EVENT:
1820 QPRINTK(qdev, RX_ERR, ERR, 2143 netif_err(qdev, rx_err, qdev->ndev,
1821 "Management Processor Fatal Error.\n"); 2144 "Management Processor Fatal Error.\n");
1822 ql_queue_fw_error(qdev); 2145 ql_queue_fw_error(qdev);
1823 return; 2146 return;
1824 2147
1825 case CAM_LOOKUP_ERR_EVENT: 2148 case CAM_LOOKUP_ERR_EVENT:
1826 QPRINTK(qdev, LINK, ERR, 2149 netif_err(qdev, link, qdev->ndev,
1827 "Multiple CAM hits lookup occurred.\n"); 2150 "Multiple CAM hits lookup occurred.\n");
1828 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n"); 2151 netif_err(qdev, drv, qdev->ndev,
2152 "This event shouldn't occur.\n");
1829 ql_queue_asic_error(qdev); 2153 ql_queue_asic_error(qdev);
1830 return; 2154 return;
1831 2155
1832 case SOFT_ECC_ERROR_EVENT: 2156 case SOFT_ECC_ERROR_EVENT:
1833 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n"); 2157 netif_err(qdev, rx_err, qdev->ndev,
2158 "Soft ECC error detected.\n");
1834 ql_queue_asic_error(qdev); 2159 ql_queue_asic_error(qdev);
1835 break; 2160 break;
1836 2161
1837 case PCI_ERR_ANON_BUF_RD: 2162 case PCI_ERR_ANON_BUF_RD:
1838 QPRINTK(qdev, RX_ERR, ERR, 2163 netif_err(qdev, rx_err, qdev->ndev,
1839 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n", 2164 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1840 ib_ae_rsp->q_id); 2165 ib_ae_rsp->q_id);
1841 ql_queue_asic_error(qdev); 2166 ql_queue_asic_error(qdev);
1842 break; 2167 break;
1843 2168
1844 default: 2169 default:
1845 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n", 2170 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
1846 ib_ae_rsp->event); 2171 ib_ae_rsp->event);
1847 ql_queue_asic_error(qdev); 2172 ql_queue_asic_error(qdev);
1848 break; 2173 break;
1849 } 2174 }
@@ -1860,9 +2185,9 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1860 /* While there are entries in the completion queue. */ 2185 /* While there are entries in the completion queue. */
1861 while (prod != rx_ring->cnsmr_idx) { 2186 while (prod != rx_ring->cnsmr_idx) {
1862 2187
1863 QPRINTK(qdev, RX_STATUS, DEBUG, 2188 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1864 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id, 2189 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
1865 prod, rx_ring->cnsmr_idx); 2190 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
1866 2191
1867 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry; 2192 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1868 rmb(); 2193 rmb();
@@ -1873,9 +2198,9 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1873 ql_process_mac_tx_intr(qdev, net_rsp); 2198 ql_process_mac_tx_intr(qdev, net_rsp);
1874 break; 2199 break;
1875 default: 2200 default:
1876 QPRINTK(qdev, RX_STATUS, DEBUG, 2201 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1877 "Hit default case, not handled! dropping the packet, opcode = %x.\n", 2202 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1878 net_rsp->opcode); 2203 net_rsp->opcode);
1879 } 2204 }
1880 count++; 2205 count++;
1881 ql_update_cq(rx_ring); 2206 ql_update_cq(rx_ring);
@@ -1907,9 +2232,9 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1907 /* While there are entries in the completion queue. */ 2232 /* While there are entries in the completion queue. */
1908 while (prod != rx_ring->cnsmr_idx) { 2233 while (prod != rx_ring->cnsmr_idx) {
1909 2234
1910 QPRINTK(qdev, RX_STATUS, DEBUG, 2235 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1911 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id, 2236 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
1912 prod, rx_ring->cnsmr_idx); 2237 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
1913 2238
1914 net_rsp = rx_ring->curr_entry; 2239 net_rsp = rx_ring->curr_entry;
1915 rmb(); 2240 rmb();
@@ -1925,11 +2250,10 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1925 net_rsp); 2250 net_rsp);
1926 break; 2251 break;
1927 default: 2252 default:
1928 { 2253 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1929 QPRINTK(qdev, RX_STATUS, DEBUG, 2254 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1930 "Hit default case, not handled! dropping the packet, opcode = %x.\n", 2255 net_rsp->opcode);
1931 net_rsp->opcode); 2256 break;
1932 }
1933 } 2257 }
1934 count++; 2258 count++;
1935 ql_update_cq(rx_ring); 2259 ql_update_cq(rx_ring);
@@ -1950,8 +2274,8 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1950 int i, work_done = 0; 2274 int i, work_done = 0;
1951 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id]; 2275 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
1952 2276
1953 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n", 2277 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1954 rx_ring->cq_id); 2278 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
1955 2279
1956 /* Service the TX rings first. They start 2280 /* Service the TX rings first. They start
1957 * right after the RSS rings. */ 2281 * right after the RSS rings. */
@@ -1963,9 +2287,9 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1963 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) && 2287 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
1964 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) != 2288 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
1965 trx_ring->cnsmr_idx)) { 2289 trx_ring->cnsmr_idx)) {
1966 QPRINTK(qdev, INTR, DEBUG, 2290 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
1967 "%s: Servicing TX completion ring %d.\n", 2291 "%s: Servicing TX completion ring %d.\n",
1968 __func__, trx_ring->cq_id); 2292 __func__, trx_ring->cq_id);
1969 ql_clean_outbound_rx_ring(trx_ring); 2293 ql_clean_outbound_rx_ring(trx_ring);
1970 } 2294 }
1971 } 2295 }
@@ -1975,9 +2299,9 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1975 */ 2299 */
1976 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != 2300 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
1977 rx_ring->cnsmr_idx) { 2301 rx_ring->cnsmr_idx) {
1978 QPRINTK(qdev, INTR, DEBUG, 2302 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
1979 "%s: Servicing RX completion ring %d.\n", 2303 "%s: Servicing RX completion ring %d.\n",
1980 __func__, rx_ring->cq_id); 2304 __func__, rx_ring->cq_id);
1981 work_done = ql_clean_inbound_rx_ring(rx_ring, budget); 2305 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1982 } 2306 }
1983 2307
@@ -1994,12 +2318,13 @@ static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *gr
1994 2318
1995 qdev->vlgrp = grp; 2319 qdev->vlgrp = grp;
1996 if (grp) { 2320 if (grp) {
1997 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n"); 2321 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2322 "Turning on VLAN in NIC_RCV_CFG.\n");
1998 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK | 2323 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1999 NIC_RCV_CFG_VLAN_MATCH_AND_NON); 2324 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2000 } else { 2325 } else {
2001 QPRINTK(qdev, IFUP, DEBUG, 2326 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2002 "Turning off VLAN in NIC_RCV_CFG.\n"); 2327 "Turning off VLAN in NIC_RCV_CFG.\n");
2003 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK); 2328 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2004 } 2329 }
2005} 2330}
@@ -2015,7 +2340,8 @@ static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2015 return; 2340 return;
2016 if (ql_set_mac_addr_reg 2341 if (ql_set_mac_addr_reg
2017 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { 2342 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2018 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n"); 2343 netif_err(qdev, ifup, qdev->ndev,
2344 "Failed to init vlan address.\n");
2019 } 2345 }
2020 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 2346 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2021} 2347}
@@ -2032,7 +2358,8 @@ static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2032 2358
2033 if (ql_set_mac_addr_reg 2359 if (ql_set_mac_addr_reg
2034 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { 2360 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2035 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n"); 2361 netif_err(qdev, ifup, qdev->ndev,
2362 "Failed to clear vlan address.\n");
2036 } 2363 }
2037 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 2364 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2038 2365
@@ -2061,7 +2388,8 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
2061 2388
2062 spin_lock(&qdev->hw_lock); 2389 spin_lock(&qdev->hw_lock);
2063 if (atomic_read(&qdev->intr_context[0].irq_cnt)) { 2390 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2064 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n"); 2391 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2392 "Shared Interrupt, Not ours!\n");
2065 spin_unlock(&qdev->hw_lock); 2393 spin_unlock(&qdev->hw_lock);
2066 return IRQ_NONE; 2394 return IRQ_NONE;
2067 } 2395 }
@@ -2074,10 +2402,11 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
2074 */ 2402 */
2075 if (var & STS_FE) { 2403 if (var & STS_FE) {
2076 ql_queue_asic_error(qdev); 2404 ql_queue_asic_error(qdev);
2077 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var); 2405 netif_err(qdev, intr, qdev->ndev,
2406 "Got fatal error, STS = %x.\n", var);
2078 var = ql_read32(qdev, ERR_STS); 2407 var = ql_read32(qdev, ERR_STS);
2079 QPRINTK(qdev, INTR, ERR, 2408 netif_err(qdev, intr, qdev->ndev,
2080 "Resetting chip. Error Status Register = 0x%x\n", var); 2409 "Resetting chip. Error Status Register = 0x%x\n", var);
2081 return IRQ_HANDLED; 2410 return IRQ_HANDLED;
2082 } 2411 }
2083 2412
@@ -2090,7 +2419,8 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
2090 * We've got an async event or mailbox completion. 2419 * We've got an async event or mailbox completion.
2091 * Handle it and clear the source of the interrupt. 2420 * Handle it and clear the source of the interrupt.
2092 */ 2421 */
2093 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n"); 2422 netif_err(qdev, intr, qdev->ndev,
2423 "Got MPI processor interrupt.\n");
2094 ql_disable_completion_interrupt(qdev, intr_context->intr); 2424 ql_disable_completion_interrupt(qdev, intr_context->intr);
2095 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); 2425 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2096 queue_delayed_work_on(smp_processor_id(), 2426 queue_delayed_work_on(smp_processor_id(),
@@ -2105,8 +2435,8 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
2105 */ 2435 */
2106 var = ql_read32(qdev, ISR1); 2436 var = ql_read32(qdev, ISR1);
2107 if (var & intr_context->irq_mask) { 2437 if (var & intr_context->irq_mask) {
2108 QPRINTK(qdev, INTR, INFO, 2438 netif_info(qdev, intr, qdev->ndev,
2109 "Waking handler for rx_ring[0].\n"); 2439 "Waking handler for rx_ring[0].\n");
2110 ql_disable_completion_interrupt(qdev, intr_context->intr); 2440 ql_disable_completion_interrupt(qdev, intr_context->intr);
2111 napi_schedule(&rx_ring->napi); 2441 napi_schedule(&rx_ring->napi);
2112 work_done++; 2442 work_done++;
@@ -2203,9 +2533,9 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2203 return NETDEV_TX_OK; 2533 return NETDEV_TX_OK;
2204 2534
2205 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { 2535 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2206 QPRINTK(qdev, TX_QUEUED, INFO, 2536 netif_info(qdev, tx_queued, qdev->ndev,
2207 "%s: shutting down tx queue %d du to lack of resources.\n", 2537 "%s: shutting down tx queue %d du to lack of resources.\n",
2208 __func__, tx_ring_idx); 2538 __func__, tx_ring_idx);
2209 netif_stop_subqueue(ndev, tx_ring->wq_id); 2539 netif_stop_subqueue(ndev, tx_ring->wq_id);
2210 atomic_inc(&tx_ring->queue_stopped); 2540 atomic_inc(&tx_ring->queue_stopped);
2211 tx_ring->tx_errors++; 2541 tx_ring->tx_errors++;
@@ -2226,8 +2556,8 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2226 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len); 2556 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2227 2557
2228 if (qdev->vlgrp && vlan_tx_tag_present(skb)) { 2558 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2229 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n", 2559 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2230 vlan_tx_tag_get(skb)); 2560 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2231 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V; 2561 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2232 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb)); 2562 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2233 } 2563 }
@@ -2241,8 +2571,8 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2241 } 2571 }
2242 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != 2572 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2243 NETDEV_TX_OK) { 2573 NETDEV_TX_OK) {
2244 QPRINTK(qdev, TX_QUEUED, ERR, 2574 netif_err(qdev, tx_queued, qdev->ndev,
2245 "Could not map the segments.\n"); 2575 "Could not map the segments.\n");
2246 tx_ring->tx_errors++; 2576 tx_ring->tx_errors++;
2247 return NETDEV_TX_BUSY; 2577 return NETDEV_TX_BUSY;
2248 } 2578 }
@@ -2253,8 +2583,9 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2253 wmb(); 2583 wmb();
2254 2584
2255 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); 2585 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2256 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n", 2586 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2257 tx_ring->prod_idx, skb->len); 2587 "tx queued, slot %d, len %d\n",
2588 tx_ring->prod_idx, skb->len);
2258 2589
2259 atomic_dec(&tx_ring->tx_count); 2590 atomic_dec(&tx_ring->tx_count);
2260 return NETDEV_TX_OK; 2591 return NETDEV_TX_OK;
@@ -2285,8 +2616,8 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2285 pci_alloc_consistent(qdev->pdev, 2616 pci_alloc_consistent(qdev->pdev,
2286 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma); 2617 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2287 if (qdev->rx_ring_shadow_reg_area == NULL) { 2618 if (qdev->rx_ring_shadow_reg_area == NULL) {
2288 QPRINTK(qdev, IFUP, ERR, 2619 netif_err(qdev, ifup, qdev->ndev,
2289 "Allocation of RX shadow space failed.\n"); 2620 "Allocation of RX shadow space failed.\n");
2290 return -ENOMEM; 2621 return -ENOMEM;
2291 } 2622 }
2292 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE); 2623 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
@@ -2294,8 +2625,8 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2294 pci_alloc_consistent(qdev->pdev, PAGE_SIZE, 2625 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2295 &qdev->tx_ring_shadow_reg_dma); 2626 &qdev->tx_ring_shadow_reg_dma);
2296 if (qdev->tx_ring_shadow_reg_area == NULL) { 2627 if (qdev->tx_ring_shadow_reg_area == NULL) {
2297 QPRINTK(qdev, IFUP, ERR, 2628 netif_err(qdev, ifup, qdev->ndev,
2298 "Allocation of TX shadow space failed.\n"); 2629 "Allocation of TX shadow space failed.\n");
2299 goto err_wqp_sh_area; 2630 goto err_wqp_sh_area;
2300 } 2631 }
2301 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE); 2632 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
@@ -2349,7 +2680,7 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2349 2680
2350 if ((tx_ring->wq_base == NULL) || 2681 if ((tx_ring->wq_base == NULL) ||
2351 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) { 2682 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2352 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n"); 2683 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2353 return -ENOMEM; 2684 return -ENOMEM;
2354 } 2685 }
2355 tx_ring->q = 2686 tx_ring->q =
@@ -2400,7 +2731,8 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
2400 for (i = 0; i < rx_ring->sbq_len; i++) { 2731 for (i = 0; i < rx_ring->sbq_len; i++) {
2401 sbq_desc = &rx_ring->sbq[i]; 2732 sbq_desc = &rx_ring->sbq[i];
2402 if (sbq_desc == NULL) { 2733 if (sbq_desc == NULL) {
2403 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i); 2734 netif_err(qdev, ifup, qdev->ndev,
2735 "sbq_desc %d is NULL.\n", i);
2404 return; 2736 return;
2405 } 2737 }
2406 if (sbq_desc->p.skb) { 2738 if (sbq_desc->p.skb) {
@@ -2527,7 +2859,7 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2527 &rx_ring->cq_base_dma); 2859 &rx_ring->cq_base_dma);
2528 2860
2529 if (rx_ring->cq_base == NULL) { 2861 if (rx_ring->cq_base == NULL) {
2530 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n"); 2862 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2531 return -ENOMEM; 2863 return -ENOMEM;
2532 } 2864 }
2533 2865
@@ -2540,8 +2872,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2540 &rx_ring->sbq_base_dma); 2872 &rx_ring->sbq_base_dma);
2541 2873
2542 if (rx_ring->sbq_base == NULL) { 2874 if (rx_ring->sbq_base == NULL) {
2543 QPRINTK(qdev, IFUP, ERR, 2875 netif_err(qdev, ifup, qdev->ndev,
2544 "Small buffer queue allocation failed.\n"); 2876 "Small buffer queue allocation failed.\n");
2545 goto err_mem; 2877 goto err_mem;
2546 } 2878 }
2547 2879
@@ -2552,8 +2884,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2552 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc), 2884 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2553 GFP_KERNEL); 2885 GFP_KERNEL);
2554 if (rx_ring->sbq == NULL) { 2886 if (rx_ring->sbq == NULL) {
2555 QPRINTK(qdev, IFUP, ERR, 2887 netif_err(qdev, ifup, qdev->ndev,
2556 "Small buffer queue control block allocation failed.\n"); 2888 "Small buffer queue control block allocation failed.\n");
2557 goto err_mem; 2889 goto err_mem;
2558 } 2890 }
2559 2891
@@ -2569,8 +2901,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2569 &rx_ring->lbq_base_dma); 2901 &rx_ring->lbq_base_dma);
2570 2902
2571 if (rx_ring->lbq_base == NULL) { 2903 if (rx_ring->lbq_base == NULL) {
2572 QPRINTK(qdev, IFUP, ERR, 2904 netif_err(qdev, ifup, qdev->ndev,
2573 "Large buffer queue allocation failed.\n"); 2905 "Large buffer queue allocation failed.\n");
2574 goto err_mem; 2906 goto err_mem;
2575 } 2907 }
2576 /* 2908 /*
@@ -2580,8 +2912,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2580 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc), 2912 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2581 GFP_KERNEL); 2913 GFP_KERNEL);
2582 if (rx_ring->lbq == NULL) { 2914 if (rx_ring->lbq == NULL) {
2583 QPRINTK(qdev, IFUP, ERR, 2915 netif_err(qdev, ifup, qdev->ndev,
2584 "Large buffer queue control block allocation failed.\n"); 2916 "Large buffer queue control block allocation failed.\n");
2585 goto err_mem; 2917 goto err_mem;
2586 } 2918 }
2587 2919
@@ -2610,10 +2942,10 @@ static void ql_tx_ring_clean(struct ql_adapter *qdev)
2610 for (i = 0; i < tx_ring->wq_len; i++) { 2942 for (i = 0; i < tx_ring->wq_len; i++) {
2611 tx_ring_desc = &tx_ring->q[i]; 2943 tx_ring_desc = &tx_ring->q[i];
2612 if (tx_ring_desc && tx_ring_desc->skb) { 2944 if (tx_ring_desc && tx_ring_desc->skb) {
2613 QPRINTK(qdev, IFDOWN, ERR, 2945 netif_err(qdev, ifdown, qdev->ndev,
2614 "Freeing lost SKB %p, from queue %d, index %d.\n", 2946 "Freeing lost SKB %p, from queue %d, index %d.\n",
2615 tx_ring_desc->skb, j, 2947 tx_ring_desc->skb, j,
2616 tx_ring_desc->index); 2948 tx_ring_desc->index);
2617 ql_unmap_send(qdev, tx_ring_desc, 2949 ql_unmap_send(qdev, tx_ring_desc,
2618 tx_ring_desc->map_cnt); 2950 tx_ring_desc->map_cnt);
2619 dev_kfree_skb(tx_ring_desc->skb); 2951 dev_kfree_skb(tx_ring_desc->skb);
@@ -2644,16 +2976,16 @@ static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2644 2976
2645 for (i = 0; i < qdev->rx_ring_count; i++) { 2977 for (i = 0; i < qdev->rx_ring_count; i++) {
2646 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) { 2978 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2647 QPRINTK(qdev, IFUP, ERR, 2979 netif_err(qdev, ifup, qdev->ndev,
2648 "RX resource allocation failed.\n"); 2980 "RX resource allocation failed.\n");
2649 goto err_mem; 2981 goto err_mem;
2650 } 2982 }
2651 } 2983 }
2652 /* Allocate tx queue resources */ 2984 /* Allocate tx queue resources */
2653 for (i = 0; i < qdev->tx_ring_count; i++) { 2985 for (i = 0; i < qdev->tx_ring_count; i++) {
2654 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) { 2986 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2655 QPRINTK(qdev, IFUP, ERR, 2987 netif_err(qdev, ifup, qdev->ndev,
2656 "TX resource allocation failed.\n"); 2988 "TX resource allocation failed.\n");
2657 goto err_mem; 2989 goto err_mem;
2658 } 2990 }
2659 } 2991 }
@@ -2788,14 +3120,15 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2788 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames); 3120 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2789 break; 3121 break;
2790 default: 3122 default:
2791 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n", 3123 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2792 rx_ring->type); 3124 "Invalid rx_ring->type = %d.\n", rx_ring->type);
2793 } 3125 }
2794 QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n"); 3126 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3127 "Initializing rx work queue.\n");
2795 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb), 3128 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2796 CFG_LCQ, rx_ring->cq_id); 3129 CFG_LCQ, rx_ring->cq_id);
2797 if (err) { 3130 if (err) {
2798 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n"); 3131 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
2799 return err; 3132 return err;
2800 } 3133 }
2801 return err; 3134 return err;
@@ -2841,10 +3174,11 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2841 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ, 3174 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
2842 (u16) tx_ring->wq_id); 3175 (u16) tx_ring->wq_id);
2843 if (err) { 3176 if (err) {
2844 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n"); 3177 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
2845 return err; 3178 return err;
2846 } 3179 }
2847 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n"); 3180 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3181 "Successfully loaded WQICB.\n");
2848 return err; 3182 return err;
2849} 3183}
2850 3184
@@ -2898,15 +3232,15 @@ static void ql_enable_msix(struct ql_adapter *qdev)
2898 if (err < 0) { 3232 if (err < 0) {
2899 kfree(qdev->msi_x_entry); 3233 kfree(qdev->msi_x_entry);
2900 qdev->msi_x_entry = NULL; 3234 qdev->msi_x_entry = NULL;
2901 QPRINTK(qdev, IFUP, WARNING, 3235 netif_warn(qdev, ifup, qdev->ndev,
2902 "MSI-X Enable failed, trying MSI.\n"); 3236 "MSI-X Enable failed, trying MSI.\n");
2903 qdev->intr_count = 1; 3237 qdev->intr_count = 1;
2904 qlge_irq_type = MSI_IRQ; 3238 qlge_irq_type = MSI_IRQ;
2905 } else if (err == 0) { 3239 } else if (err == 0) {
2906 set_bit(QL_MSIX_ENABLED, &qdev->flags); 3240 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2907 QPRINTK(qdev, IFUP, INFO, 3241 netif_info(qdev, ifup, qdev->ndev,
2908 "MSI-X Enabled, got %d vectors.\n", 3242 "MSI-X Enabled, got %d vectors.\n",
2909 qdev->intr_count); 3243 qdev->intr_count);
2910 return; 3244 return;
2911 } 3245 }
2912 } 3246 }
@@ -2915,13 +3249,14 @@ msi:
2915 if (qlge_irq_type == MSI_IRQ) { 3249 if (qlge_irq_type == MSI_IRQ) {
2916 if (!pci_enable_msi(qdev->pdev)) { 3250 if (!pci_enable_msi(qdev->pdev)) {
2917 set_bit(QL_MSI_ENABLED, &qdev->flags); 3251 set_bit(QL_MSI_ENABLED, &qdev->flags);
2918 QPRINTK(qdev, IFUP, INFO, 3252 netif_info(qdev, ifup, qdev->ndev,
2919 "Running with MSI interrupts.\n"); 3253 "Running with MSI interrupts.\n");
2920 return; 3254 return;
2921 } 3255 }
2922 } 3256 }
2923 qlge_irq_type = LEG_IRQ; 3257 qlge_irq_type = LEG_IRQ;
2924 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n"); 3258 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3259 "Running with legacy interrupts.\n");
2925} 3260}
2926 3261
2927/* Each vector services 1 RSS ring and and 1 or more 3262/* Each vector services 1 RSS ring and and 1 or more
@@ -3093,12 +3428,12 @@ static void ql_free_irq(struct ql_adapter *qdev)
3093 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { 3428 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3094 free_irq(qdev->msi_x_entry[i].vector, 3429 free_irq(qdev->msi_x_entry[i].vector,
3095 &qdev->rx_ring[i]); 3430 &qdev->rx_ring[i]);
3096 QPRINTK(qdev, IFDOWN, DEBUG, 3431 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3097 "freeing msix interrupt %d.\n", i); 3432 "freeing msix interrupt %d.\n", i);
3098 } else { 3433 } else {
3099 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]); 3434 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3100 QPRINTK(qdev, IFDOWN, DEBUG, 3435 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3101 "freeing msi interrupt %d.\n", i); 3436 "freeing msi interrupt %d.\n", i);
3102 } 3437 }
3103 } 3438 }
3104 } 3439 }
@@ -3123,32 +3458,33 @@ static int ql_request_irq(struct ql_adapter *qdev)
3123 intr_context->name, 3458 intr_context->name,
3124 &qdev->rx_ring[i]); 3459 &qdev->rx_ring[i]);
3125 if (status) { 3460 if (status) {
3126 QPRINTK(qdev, IFUP, ERR, 3461 netif_err(qdev, ifup, qdev->ndev,
3127 "Failed request for MSIX interrupt %d.\n", 3462 "Failed request for MSIX interrupt %d.\n",
3128 i); 3463 i);
3129 goto err_irq; 3464 goto err_irq;
3130 } else { 3465 } else {
3131 QPRINTK(qdev, IFUP, DEBUG, 3466 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3132 "Hooked intr %d, queue type %s%s%s, with name %s.\n", 3467 "Hooked intr %d, queue type %s, with name %s.\n",
3133 i, 3468 i,
3134 qdev->rx_ring[i].type == 3469 qdev->rx_ring[i].type == DEFAULT_Q ?
3135 DEFAULT_Q ? "DEFAULT_Q" : "", 3470 "DEFAULT_Q" :
3136 qdev->rx_ring[i].type == 3471 qdev->rx_ring[i].type == TX_Q ?
3137 TX_Q ? "TX_Q" : "", 3472 "TX_Q" :
3138 qdev->rx_ring[i].type == 3473 qdev->rx_ring[i].type == RX_Q ?
3139 RX_Q ? "RX_Q" : "", intr_context->name); 3474 "RX_Q" : "",
3475 intr_context->name);
3140 } 3476 }
3141 } else { 3477 } else {
3142 QPRINTK(qdev, IFUP, DEBUG, 3478 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3143 "trying msi or legacy interrupts.\n"); 3479 "trying msi or legacy interrupts.\n");
3144 QPRINTK(qdev, IFUP, DEBUG, 3480 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3145 "%s: irq = %d.\n", __func__, pdev->irq); 3481 "%s: irq = %d.\n", __func__, pdev->irq);
3146 QPRINTK(qdev, IFUP, DEBUG, 3482 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3147 "%s: context->name = %s.\n", __func__, 3483 "%s: context->name = %s.\n", __func__,
3148 intr_context->name); 3484 intr_context->name);
3149 QPRINTK(qdev, IFUP, DEBUG, 3485 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3150 "%s: dev_id = 0x%p.\n", __func__, 3486 "%s: dev_id = 0x%p.\n", __func__,
3151 &qdev->rx_ring[0]); 3487 &qdev->rx_ring[0]);
3152 status = 3488 status =
3153 request_irq(pdev->irq, qlge_isr, 3489 request_irq(pdev->irq, qlge_isr,
3154 test_bit(QL_MSI_ENABLED, 3490 test_bit(QL_MSI_ENABLED,
@@ -3158,20 +3494,20 @@ static int ql_request_irq(struct ql_adapter *qdev)
3158 if (status) 3494 if (status)
3159 goto err_irq; 3495 goto err_irq;
3160 3496
3161 QPRINTK(qdev, IFUP, ERR, 3497 netif_err(qdev, ifup, qdev->ndev,
3162 "Hooked intr %d, queue type %s%s%s, with name %s.\n", 3498 "Hooked intr %d, queue type %s, with name %s.\n",
3163 i, 3499 i,
3164 qdev->rx_ring[0].type == 3500 qdev->rx_ring[0].type == DEFAULT_Q ?
3165 DEFAULT_Q ? "DEFAULT_Q" : "", 3501 "DEFAULT_Q" :
3166 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "", 3502 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3167 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "", 3503 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3168 intr_context->name); 3504 intr_context->name);
3169 } 3505 }
3170 intr_context->hooked = 1; 3506 intr_context->hooked = 1;
3171 } 3507 }
3172 return status; 3508 return status;
3173err_irq: 3509err_irq:
3174 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n"); 3510 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3175 ql_free_irq(qdev); 3511 ql_free_irq(qdev);
3176 return status; 3512 return status;
3177} 3513}
@@ -3205,14 +3541,15 @@ static int ql_start_rss(struct ql_adapter *qdev)
3205 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40); 3541 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3206 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16); 3542 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3207 3543
3208 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n"); 3544 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
3209 3545
3210 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0); 3546 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3211 if (status) { 3547 if (status) {
3212 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n"); 3548 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3213 return status; 3549 return status;
3214 } 3550 }
3215 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n"); 3551 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3552 "Successfully loaded RICB.\n");
3216 return status; 3553 return status;
3217} 3554}
3218 3555
@@ -3227,9 +3564,8 @@ static int ql_clear_routing_entries(struct ql_adapter *qdev)
3227 for (i = 0; i < 16; i++) { 3564 for (i = 0; i < 16; i++) {
3228 status = ql_set_routing_reg(qdev, i, 0, 0); 3565 status = ql_set_routing_reg(qdev, i, 0, 0);
3229 if (status) { 3566 if (status) {
3230 QPRINTK(qdev, IFUP, ERR, 3567 netif_err(qdev, ifup, qdev->ndev,
3231 "Failed to init routing register for CAM " 3568 "Failed to init routing register for CAM packets.\n");
3232 "packets.\n");
3233 break; 3569 break;
3234 } 3570 }
3235 } 3571 }
@@ -3253,14 +3589,14 @@ static int ql_route_initialize(struct ql_adapter *qdev)
3253 3589
3254 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1); 3590 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3255 if (status) { 3591 if (status) {
3256 QPRINTK(qdev, IFUP, ERR, 3592 netif_err(qdev, ifup, qdev->ndev,
3257 "Failed to init routing register for error packets.\n"); 3593 "Failed to init routing register for error packets.\n");
3258 goto exit; 3594 goto exit;
3259 } 3595 }
3260 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1); 3596 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3261 if (status) { 3597 if (status) {
3262 QPRINTK(qdev, IFUP, ERR, 3598 netif_err(qdev, ifup, qdev->ndev,
3263 "Failed to init routing register for broadcast packets.\n"); 3599 "Failed to init routing register for broadcast packets.\n");
3264 goto exit; 3600 goto exit;
3265 } 3601 }
3266 /* If we have more than one inbound queue, then turn on RSS in the 3602 /* If we have more than one inbound queue, then turn on RSS in the
@@ -3270,8 +3606,8 @@ static int ql_route_initialize(struct ql_adapter *qdev)
3270 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT, 3606 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3271 RT_IDX_RSS_MATCH, 1); 3607 RT_IDX_RSS_MATCH, 1);
3272 if (status) { 3608 if (status) {
3273 QPRINTK(qdev, IFUP, ERR, 3609 netif_err(qdev, ifup, qdev->ndev,
3274 "Failed to init routing register for MATCH RSS packets.\n"); 3610 "Failed to init routing register for MATCH RSS packets.\n");
3275 goto exit; 3611 goto exit;
3276 } 3612 }
3277 } 3613 }
@@ -3279,8 +3615,8 @@ static int ql_route_initialize(struct ql_adapter *qdev)
3279 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT, 3615 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3280 RT_IDX_CAM_HIT, 1); 3616 RT_IDX_CAM_HIT, 1);
3281 if (status) 3617 if (status)
3282 QPRINTK(qdev, IFUP, ERR, 3618 netif_err(qdev, ifup, qdev->ndev,
3283 "Failed to init routing register for CAM packets.\n"); 3619 "Failed to init routing register for CAM packets.\n");
3284exit: 3620exit:
3285 ql_sem_unlock(qdev, SEM_RT_IDX_MASK); 3621 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3286 return status; 3622 return status;
@@ -3298,13 +3634,13 @@ int ql_cam_route_initialize(struct ql_adapter *qdev)
3298 set &= qdev->port_link_up; 3634 set &= qdev->port_link_up;
3299 status = ql_set_mac_addr(qdev, set); 3635 status = ql_set_mac_addr(qdev, set);
3300 if (status) { 3636 if (status) {
3301 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n"); 3637 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3302 return status; 3638 return status;
3303 } 3639 }
3304 3640
3305 status = ql_route_initialize(qdev); 3641 status = ql_route_initialize(qdev);
3306 if (status) 3642 if (status)
3307 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n"); 3643 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3308 3644
3309 return status; 3645 return status;
3310} 3646}
@@ -3332,15 +3668,15 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
3332 3668
3333 /* Enable the function, set pagesize, enable error checking. */ 3669 /* Enable the function, set pagesize, enable error checking. */
3334 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND | 3670 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3335 FSC_EC | FSC_VM_PAGE_4K | FSC_SH; 3671 FSC_EC | FSC_VM_PAGE_4K;
3672 value |= SPLT_SETTING;
3336 3673
3337 /* Set/clear header splitting. */ 3674 /* Set/clear header splitting. */
3338 mask = FSC_VM_PAGESIZE_MASK | 3675 mask = FSC_VM_PAGESIZE_MASK |
3339 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16); 3676 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3340 ql_write32(qdev, FSC, mask | value); 3677 ql_write32(qdev, FSC, mask | value);
3341 3678
3342 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP | 3679 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3343 min(SMALL_BUF_MAP_SIZE, MAX_SPLIT_SIZE));
3344 3680
3345 /* Set RX packet routing to use port/pci function on which the 3681 /* Set RX packet routing to use port/pci function on which the
3346 * packet arrived on in addition to usual frame routing. 3682 * packet arrived on in addition to usual frame routing.
@@ -3369,8 +3705,8 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
3369 for (i = 0; i < qdev->rx_ring_count; i++) { 3705 for (i = 0; i < qdev->rx_ring_count; i++) {
3370 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); 3706 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3371 if (status) { 3707 if (status) {
3372 QPRINTK(qdev, IFUP, ERR, 3708 netif_err(qdev, ifup, qdev->ndev,
3373 "Failed to start rx ring[%d].\n", i); 3709 "Failed to start rx ring[%d].\n", i);
3374 return status; 3710 return status;
3375 } 3711 }
3376 } 3712 }
@@ -3381,7 +3717,7 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
3381 if (qdev->rss_ring_count > 1) { 3717 if (qdev->rss_ring_count > 1) {
3382 status = ql_start_rss(qdev); 3718 status = ql_start_rss(qdev);
3383 if (status) { 3719 if (status) {
3384 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n"); 3720 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3385 return status; 3721 return status;
3386 } 3722 }
3387 } 3723 }
@@ -3390,8 +3726,8 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
3390 for (i = 0; i < qdev->tx_ring_count; i++) { 3726 for (i = 0; i < qdev->tx_ring_count; i++) {
3391 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]); 3727 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3392 if (status) { 3728 if (status) {
3393 QPRINTK(qdev, IFUP, ERR, 3729 netif_err(qdev, ifup, qdev->ndev,
3394 "Failed to start tx ring[%d].\n", i); 3730 "Failed to start tx ring[%d].\n", i);
3395 return status; 3731 return status;
3396 } 3732 }
3397 } 3733 }
@@ -3399,20 +3735,20 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
3399 /* Initialize the port and set the max framesize. */ 3735 /* Initialize the port and set the max framesize. */
3400 status = qdev->nic_ops->port_initialize(qdev); 3736 status = qdev->nic_ops->port_initialize(qdev);
3401 if (status) 3737 if (status)
3402 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n"); 3738 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3403 3739
3404 /* Set up the MAC address and frame routing filter. */ 3740 /* Set up the MAC address and frame routing filter. */
3405 status = ql_cam_route_initialize(qdev); 3741 status = ql_cam_route_initialize(qdev);
3406 if (status) { 3742 if (status) {
3407 QPRINTK(qdev, IFUP, ERR, 3743 netif_err(qdev, ifup, qdev->ndev,
3408 "Failed to init CAM/Routing tables.\n"); 3744 "Failed to init CAM/Routing tables.\n");
3409 return status; 3745 return status;
3410 } 3746 }
3411 3747
3412 /* Start NAPI for the RSS queues. */ 3748 /* Start NAPI for the RSS queues. */
3413 for (i = 0; i < qdev->rss_ring_count; i++) { 3749 for (i = 0; i < qdev->rss_ring_count; i++) {
3414 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n", 3750 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3415 i); 3751 "Enabling NAPI for rx_ring[%d].\n", i);
3416 napi_enable(&qdev->rx_ring[i].napi); 3752 napi_enable(&qdev->rx_ring[i].napi);
3417 } 3753 }
3418 3754
@@ -3429,7 +3765,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
3429 /* Clear all the entries in the routing table. */ 3765 /* Clear all the entries in the routing table. */
3430 status = ql_clear_routing_entries(qdev); 3766 status = ql_clear_routing_entries(qdev);
3431 if (status) { 3767 if (status) {
3432 QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n"); 3768 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3433 return status; 3769 return status;
3434 } 3770 }
3435 3771
@@ -3452,8 +3788,8 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
3452 } while (time_before(jiffies, end_jiffies)); 3788 } while (time_before(jiffies, end_jiffies));
3453 3789
3454 if (value & RST_FO_FR) { 3790 if (value & RST_FO_FR) {
3455 QPRINTK(qdev, IFDOWN, ERR, 3791 netif_err(qdev, ifdown, qdev->ndev,
3456 "ETIMEDOUT!!! errored out of resetting the chip!\n"); 3792 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3457 status = -ETIMEDOUT; 3793 status = -ETIMEDOUT;
3458 } 3794 }
3459 3795
@@ -3466,16 +3802,17 @@ static void ql_display_dev_info(struct net_device *ndev)
3466{ 3802{
3467 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 3803 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3468 3804
3469 QPRINTK(qdev, PROBE, INFO, 3805 netif_info(qdev, probe, qdev->ndev,
3470 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, " 3806 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3471 "XG Roll = %d, XG Rev = %d.\n", 3807 "XG Roll = %d, XG Rev = %d.\n",
3472 qdev->func, 3808 qdev->func,
3473 qdev->port, 3809 qdev->port,
3474 qdev->chip_rev_id & 0x0000000f, 3810 qdev->chip_rev_id & 0x0000000f,
3475 qdev->chip_rev_id >> 4 & 0x0000000f, 3811 qdev->chip_rev_id >> 4 & 0x0000000f,
3476 qdev->chip_rev_id >> 8 & 0x0000000f, 3812 qdev->chip_rev_id >> 8 & 0x0000000f,
3477 qdev->chip_rev_id >> 12 & 0x0000000f); 3813 qdev->chip_rev_id >> 12 & 0x0000000f);
3478 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr); 3814 netif_info(qdev, probe, qdev->ndev,
3815 "MAC address %pM\n", ndev->dev_addr);
3479} 3816}
3480 3817
3481int ql_wol(struct ql_adapter *qdev) 3818int ql_wol(struct ql_adapter *qdev)
@@ -3492,23 +3829,23 @@ int ql_wol(struct ql_adapter *qdev)
3492 3829
3493 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST | 3830 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3494 WAKE_MCAST | WAKE_BCAST)) { 3831 WAKE_MCAST | WAKE_BCAST)) {
3495 QPRINTK(qdev, IFDOWN, ERR, 3832 netif_err(qdev, ifdown, qdev->ndev,
3496 "Unsupported WOL paramter. qdev->wol = 0x%x.\n", 3833 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3497 qdev->wol); 3834 qdev->wol);
3498 return -EINVAL; 3835 return -EINVAL;
3499 } 3836 }
3500 3837
3501 if (qdev->wol & WAKE_MAGIC) { 3838 if (qdev->wol & WAKE_MAGIC) {
3502 status = ql_mb_wol_set_magic(qdev, 1); 3839 status = ql_mb_wol_set_magic(qdev, 1);
3503 if (status) { 3840 if (status) {
3504 QPRINTK(qdev, IFDOWN, ERR, 3841 netif_err(qdev, ifdown, qdev->ndev,
3505 "Failed to set magic packet on %s.\n", 3842 "Failed to set magic packet on %s.\n",
3506 qdev->ndev->name); 3843 qdev->ndev->name);
3507 return status; 3844 return status;
3508 } else 3845 } else
3509 QPRINTK(qdev, DRV, INFO, 3846 netif_info(qdev, drv, qdev->ndev,
3510 "Enabled magic packet successfully on %s.\n", 3847 "Enabled magic packet successfully on %s.\n",
3511 qdev->ndev->name); 3848 qdev->ndev->name);
3512 3849
3513 wol |= MB_WOL_MAGIC_PKT; 3850 wol |= MB_WOL_MAGIC_PKT;
3514 } 3851 }
@@ -3516,9 +3853,10 @@ int ql_wol(struct ql_adapter *qdev)
3516 if (qdev->wol) { 3853 if (qdev->wol) {
3517 wol |= MB_WOL_MODE_ON; 3854 wol |= MB_WOL_MODE_ON;
3518 status = ql_mb_wol_mode(qdev, wol); 3855 status = ql_mb_wol_mode(qdev, wol);
3519 QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n", 3856 netif_err(qdev, drv, qdev->ndev,
3520 (status == 0) ? "Sucessfully set" : "Failed", wol, 3857 "WOL %s (wol code 0x%x) on %s\n",
3521 qdev->ndev->name); 3858 (status == 0) ? "Sucessfully set" : "Failed",
3859 wol, qdev->ndev->name);
3522 } 3860 }
3523 3861
3524 return status; 3862 return status;
@@ -3538,6 +3876,7 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3538 cancel_delayed_work_sync(&qdev->mpi_reset_work); 3876 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3539 cancel_delayed_work_sync(&qdev->mpi_work); 3877 cancel_delayed_work_sync(&qdev->mpi_work);
3540 cancel_delayed_work_sync(&qdev->mpi_idc_work); 3878 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3879 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3541 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); 3880 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3542 3881
3543 for (i = 0; i < qdev->rss_ring_count; i++) 3882 for (i = 0; i < qdev->rss_ring_count; i++)
@@ -3558,8 +3897,8 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3558 3897
3559 status = ql_adapter_reset(qdev); 3898 status = ql_adapter_reset(qdev);
3560 if (status) 3899 if (status)
3561 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n", 3900 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3562 qdev->func); 3901 qdev->func);
3563 return status; 3902 return status;
3564} 3903}
3565 3904
@@ -3569,7 +3908,7 @@ static int ql_adapter_up(struct ql_adapter *qdev)
3569 3908
3570 err = ql_adapter_initialize(qdev); 3909 err = ql_adapter_initialize(qdev);
3571 if (err) { 3910 if (err) {
3572 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n"); 3911 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3573 goto err_init; 3912 goto err_init;
3574 } 3913 }
3575 set_bit(QL_ADAPTER_UP, &qdev->flags); 3914 set_bit(QL_ADAPTER_UP, &qdev->flags);
@@ -3601,7 +3940,7 @@ static int ql_get_adapter_resources(struct ql_adapter *qdev)
3601 int status = 0; 3940 int status = 0;
3602 3941
3603 if (ql_alloc_mem_resources(qdev)) { 3942 if (ql_alloc_mem_resources(qdev)) {
3604 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n"); 3943 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
3605 return -ENOMEM; 3944 return -ENOMEM;
3606 } 3945 }
3607 status = ql_request_irq(qdev); 3946 status = ql_request_irq(qdev);
@@ -3612,6 +3951,16 @@ static int qlge_close(struct net_device *ndev)
3612{ 3951{
3613 struct ql_adapter *qdev = netdev_priv(ndev); 3952 struct ql_adapter *qdev = netdev_priv(ndev);
3614 3953
3954 /* If we hit pci_channel_io_perm_failure
3955 * failure condition, then we already
3956 * brought the adapter down.
3957 */
3958 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3959 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3960 clear_bit(QL_EEH_FATAL, &qdev->flags);
3961 return 0;
3962 }
3963
3615 /* 3964 /*
3616 * Wait for device to recover from a reset. 3965 * Wait for device to recover from a reset.
3617 * (Rarely happens, but possible.) 3966 * (Rarely happens, but possible.)
@@ -3681,9 +4030,10 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3681 rx_ring->lbq_size = 4030 rx_ring->lbq_size =
3682 rx_ring->lbq_len * sizeof(__le64); 4031 rx_ring->lbq_len * sizeof(__le64);
3683 rx_ring->lbq_buf_size = (u16)lbq_buf_len; 4032 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
3684 QPRINTK(qdev, IFUP, DEBUG, 4033 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3685 "lbq_buf_size %d, order = %d\n", 4034 "lbq_buf_size %d, order = %d\n",
3686 rx_ring->lbq_buf_size, qdev->lbq_buf_order); 4035 rx_ring->lbq_buf_size,
4036 qdev->lbq_buf_order);
3687 rx_ring->sbq_len = NUM_SMALL_BUFFERS; 4037 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3688 rx_ring->sbq_size = 4038 rx_ring->sbq_size =
3689 rx_ring->sbq_len * sizeof(__le64); 4039 rx_ring->sbq_len * sizeof(__le64);
@@ -3747,14 +4097,14 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
3747 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { 4097 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
3748 int i = 3; 4098 int i = 3;
3749 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { 4099 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
3750 QPRINTK(qdev, IFUP, ERR, 4100 netif_err(qdev, ifup, qdev->ndev,
3751 "Waiting for adapter UP...\n"); 4101 "Waiting for adapter UP...\n");
3752 ssleep(1); 4102 ssleep(1);
3753 } 4103 }
3754 4104
3755 if (!i) { 4105 if (!i) {
3756 QPRINTK(qdev, IFUP, ERR, 4106 netif_err(qdev, ifup, qdev->ndev,
3757 "Timed out waiting for adapter UP\n"); 4107 "Timed out waiting for adapter UP\n");
3758 return -ETIMEDOUT; 4108 return -ETIMEDOUT;
3759 } 4109 }
3760 } 4110 }
@@ -3780,8 +4130,8 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
3780 4130
3781 return status; 4131 return status;
3782error: 4132error:
3783 QPRINTK(qdev, IFUP, ALERT, 4133 netif_alert(qdev, ifup, qdev->ndev,
3784 "Driver up/down cycle failed, closing device.\n"); 4134 "Driver up/down cycle failed, closing device.\n");
3785 set_bit(QL_ADAPTER_UP, &qdev->flags); 4135 set_bit(QL_ADAPTER_UP, &qdev->flags);
3786 dev_close(qdev->ndev); 4136 dev_close(qdev->ndev);
3787 return status; 4137 return status;
@@ -3793,28 +4143,25 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3793 int status; 4143 int status;
3794 4144
3795 if (ndev->mtu == 1500 && new_mtu == 9000) { 4145 if (ndev->mtu == 1500 && new_mtu == 9000) {
3796 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n"); 4146 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
3797 } else if (ndev->mtu == 9000 && new_mtu == 1500) { 4147 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3798 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n"); 4148 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
3799 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3800 (ndev->mtu == 9000 && new_mtu == 9000)) {
3801 return 0;
3802 } else 4149 } else
3803 return -EINVAL; 4150 return -EINVAL;
3804 4151
3805 queue_delayed_work(qdev->workqueue, 4152 queue_delayed_work(qdev->workqueue,
3806 &qdev->mpi_port_cfg_work, 3*HZ); 4153 &qdev->mpi_port_cfg_work, 3*HZ);
3807 4154
4155 ndev->mtu = new_mtu;
4156
3808 if (!netif_running(qdev->ndev)) { 4157 if (!netif_running(qdev->ndev)) {
3809 ndev->mtu = new_mtu;
3810 return 0; 4158 return 0;
3811 } 4159 }
3812 4160
3813 ndev->mtu = new_mtu;
3814 status = ql_change_rx_buffers(qdev); 4161 status = ql_change_rx_buffers(qdev);
3815 if (status) { 4162 if (status) {
3816 QPRINTK(qdev, IFUP, ERR, 4163 netif_err(qdev, ifup, qdev->ndev,
3817 "Changing MTU failed.\n"); 4164 "Changing MTU failed.\n");
3818 } 4165 }
3819 4166
3820 return status; 4167 return status;
@@ -3874,8 +4221,8 @@ static void qlge_set_multicast_list(struct net_device *ndev)
3874 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) { 4221 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3875 if (ql_set_routing_reg 4222 if (ql_set_routing_reg
3876 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) { 4223 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3877 QPRINTK(qdev, HW, ERR, 4224 netif_err(qdev, hw, qdev->ndev,
3878 "Failed to set promiscous mode.\n"); 4225 "Failed to set promiscous mode.\n");
3879 } else { 4226 } else {
3880 set_bit(QL_PROMISCUOUS, &qdev->flags); 4227 set_bit(QL_PROMISCUOUS, &qdev->flags);
3881 } 4228 }
@@ -3884,8 +4231,8 @@ static void qlge_set_multicast_list(struct net_device *ndev)
3884 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) { 4231 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3885 if (ql_set_routing_reg 4232 if (ql_set_routing_reg
3886 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) { 4233 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3887 QPRINTK(qdev, HW, ERR, 4234 netif_err(qdev, hw, qdev->ndev,
3888 "Failed to clear promiscous mode.\n"); 4235 "Failed to clear promiscous mode.\n");
3889 } else { 4236 } else {
3890 clear_bit(QL_PROMISCUOUS, &qdev->flags); 4237 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3891 } 4238 }
@@ -3897,12 +4244,12 @@ static void qlge_set_multicast_list(struct net_device *ndev)
3897 * transition is taking place. 4244 * transition is taking place.
3898 */ 4245 */
3899 if ((ndev->flags & IFF_ALLMULTI) || 4246 if ((ndev->flags & IFF_ALLMULTI) ||
3900 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) { 4247 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
3901 if (!test_bit(QL_ALLMULTI, &qdev->flags)) { 4248 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3902 if (ql_set_routing_reg 4249 if (ql_set_routing_reg
3903 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) { 4250 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3904 QPRINTK(qdev, HW, ERR, 4251 netif_err(qdev, hw, qdev->ndev,
3905 "Failed to set all-multi mode.\n"); 4252 "Failed to set all-multi mode.\n");
3906 } else { 4253 } else {
3907 set_bit(QL_ALLMULTI, &qdev->flags); 4254 set_bit(QL_ALLMULTI, &qdev->flags);
3908 } 4255 }
@@ -3911,32 +4258,34 @@ static void qlge_set_multicast_list(struct net_device *ndev)
3911 if (test_bit(QL_ALLMULTI, &qdev->flags)) { 4258 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3912 if (ql_set_routing_reg 4259 if (ql_set_routing_reg
3913 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) { 4260 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3914 QPRINTK(qdev, HW, ERR, 4261 netif_err(qdev, hw, qdev->ndev,
3915 "Failed to clear all-multi mode.\n"); 4262 "Failed to clear all-multi mode.\n");
3916 } else { 4263 } else {
3917 clear_bit(QL_ALLMULTI, &qdev->flags); 4264 clear_bit(QL_ALLMULTI, &qdev->flags);
3918 } 4265 }
3919 } 4266 }
3920 } 4267 }
3921 4268
3922 if (ndev->mc_count) { 4269 if (!netdev_mc_empty(ndev)) {
3923 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 4270 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3924 if (status) 4271 if (status)
3925 goto exit; 4272 goto exit;
3926 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr; 4273 i = 0;
3927 i++, mc_ptr = mc_ptr->next) 4274 netdev_for_each_mc_addr(mc_ptr, ndev) {
3928 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr, 4275 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3929 MAC_ADDR_TYPE_MULTI_MAC, i)) { 4276 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3930 QPRINTK(qdev, HW, ERR, 4277 netif_err(qdev, hw, qdev->ndev,
3931 "Failed to loadmulticast address.\n"); 4278 "Failed to loadmulticast address.\n");
3932 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 4279 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3933 goto exit; 4280 goto exit;
3934 } 4281 }
4282 i++;
4283 }
3935 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 4284 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3936 if (ql_set_routing_reg 4285 if (ql_set_routing_reg
3937 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) { 4286 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3938 QPRINTK(qdev, HW, ERR, 4287 netif_err(qdev, hw, qdev->ndev,
3939 "Failed to set multicast match mode.\n"); 4288 "Failed to set multicast match mode.\n");
3940 } else { 4289 } else {
3941 set_bit(QL_ALLMULTI, &qdev->flags); 4290 set_bit(QL_ALLMULTI, &qdev->flags);
3942 } 4291 }
@@ -3954,6 +4303,8 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
3954 if (!is_valid_ether_addr(addr->sa_data)) 4303 if (!is_valid_ether_addr(addr->sa_data))
3955 return -EADDRNOTAVAIL; 4304 return -EADDRNOTAVAIL;
3956 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 4305 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4306 /* Update local copy of current mac address. */
4307 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
3957 4308
3958 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 4309 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3959 if (status) 4310 if (status)
@@ -3961,7 +4312,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
3961 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, 4312 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3962 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); 4313 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
3963 if (status) 4314 if (status)
3964 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n"); 4315 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
3965 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 4316 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3966 return status; 4317 return status;
3967} 4318}
@@ -3994,8 +4345,8 @@ static void ql_asic_reset_work(struct work_struct *work)
3994 rtnl_unlock(); 4345 rtnl_unlock();
3995 return; 4346 return;
3996error: 4347error:
3997 QPRINTK(qdev, IFUP, ALERT, 4348 netif_alert(qdev, ifup, qdev->ndev,
3998 "Driver up/down cycle failed, closing device\n"); 4349 "Driver up/down cycle failed, closing device\n");
3999 4350
4000 set_bit(QL_ADAPTER_UP, &qdev->flags); 4351 set_bit(QL_ADAPTER_UP, &qdev->flags);
4001 dev_close(qdev->ndev); 4352 dev_close(qdev->ndev);
@@ -4094,6 +4445,7 @@ static void ql_release_all(struct pci_dev *pdev)
4094 iounmap(qdev->reg_base); 4445 iounmap(qdev->reg_base);
4095 if (qdev->doorbell_area) 4446 if (qdev->doorbell_area)
4096 iounmap(qdev->doorbell_area); 4447 iounmap(qdev->doorbell_area);
4448 vfree(qdev->mpi_coredump);
4097 pci_release_regions(pdev); 4449 pci_release_regions(pdev);
4098 pci_set_drvdata(pdev, NULL); 4450 pci_set_drvdata(pdev, NULL);
4099} 4451}
@@ -4175,6 +4527,17 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4175 spin_lock_init(&qdev->hw_lock); 4527 spin_lock_init(&qdev->hw_lock);
4176 spin_lock_init(&qdev->stats_lock); 4528 spin_lock_init(&qdev->stats_lock);
4177 4529
4530 if (qlge_mpi_coredump) {
4531 qdev->mpi_coredump =
4532 vmalloc(sizeof(struct ql_mpi_coredump));
4533 if (qdev->mpi_coredump == NULL) {
4534 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4535 err = -ENOMEM;
4536 goto err_out2;
4537 }
4538 if (qlge_force_coredump)
4539 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4540 }
4178 /* make sure the EEPROM is good */ 4541 /* make sure the EEPROM is good */
4179 err = qdev->nic_ops->get_flash(qdev); 4542 err = qdev->nic_ops->get_flash(qdev);
4180 if (err) { 4543 if (err) {
@@ -4183,6 +4546,8 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4183 } 4546 }
4184 4547
4185 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); 4548 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4549 /* Keep local copy of current mac address. */
4550 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4186 4551
4187 /* Set up the default ring sizes. */ 4552 /* Set up the default ring sizes. */
4188 qdev->tx_ring_size = NUM_TX_RING_ENTRIES; 4553 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
@@ -4204,6 +4569,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4204 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); 4569 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4205 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); 4570 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4206 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); 4571 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4572 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4207 init_completion(&qdev->ide_completion); 4573 init_completion(&qdev->ide_completion);
4208 4574
4209 if (!cards_found) { 4575 if (!cards_found) {
@@ -4234,6 +4600,21 @@ static const struct net_device_ops qlge_netdev_ops = {
4234 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, 4600 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4235}; 4601};
4236 4602
4603static void ql_timer(unsigned long data)
4604{
4605 struct ql_adapter *qdev = (struct ql_adapter *)data;
4606 u32 var = 0;
4607
4608 var = ql_read32(qdev, STS);
4609 if (pci_channel_offline(qdev->pdev)) {
4610 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4611 return;
4612 }
4613
4614 qdev->timer.expires = jiffies + (5*HZ);
4615 add_timer(&qdev->timer);
4616}
4617
4237static int __devinit qlge_probe(struct pci_dev *pdev, 4618static int __devinit qlge_probe(struct pci_dev *pdev,
4238 const struct pci_device_id *pci_entry) 4619 const struct pci_device_id *pci_entry)
4239{ 4620{
@@ -4285,6 +4666,14 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
4285 pci_disable_device(pdev); 4666 pci_disable_device(pdev);
4286 return err; 4667 return err;
4287 } 4668 }
4669 /* Start up the timer to trigger EEH if
4670 * the bus goes dead
4671 */
4672 init_timer_deferrable(&qdev->timer);
4673 qdev->timer.data = (unsigned long)qdev;
4674 qdev->timer.function = ql_timer;
4675 qdev->timer.expires = jiffies + (5*HZ);
4676 add_timer(&qdev->timer);
4288 ql_link_off(qdev); 4677 ql_link_off(qdev);
4289 ql_display_dev_info(ndev); 4678 ql_display_dev_info(ndev);
4290 atomic_set(&qdev->lb_count, 0); 4679 atomic_set(&qdev->lb_count, 0);
@@ -4305,6 +4694,8 @@ int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4305static void __devexit qlge_remove(struct pci_dev *pdev) 4694static void __devexit qlge_remove(struct pci_dev *pdev)
4306{ 4695{
4307 struct net_device *ndev = pci_get_drvdata(pdev); 4696 struct net_device *ndev = pci_get_drvdata(pdev);
4697 struct ql_adapter *qdev = netdev_priv(ndev);
4698 del_timer_sync(&qdev->timer);
4308 unregister_netdev(ndev); 4699 unregister_netdev(ndev);
4309 ql_release_all(pdev); 4700 ql_release_all(pdev);
4310 pci_disable_device(pdev); 4701 pci_disable_device(pdev);
@@ -4327,6 +4718,7 @@ static void ql_eeh_close(struct net_device *ndev)
4327 cancel_delayed_work_sync(&qdev->mpi_reset_work); 4718 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4328 cancel_delayed_work_sync(&qdev->mpi_work); 4719 cancel_delayed_work_sync(&qdev->mpi_work);
4329 cancel_delayed_work_sync(&qdev->mpi_idc_work); 4720 cancel_delayed_work_sync(&qdev->mpi_idc_work);
4721 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
4330 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); 4722 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4331 4723
4332 for (i = 0; i < qdev->rss_ring_count; i++) 4724 for (i = 0; i < qdev->rss_ring_count; i++)
@@ -4346,6 +4738,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4346 enum pci_channel_state state) 4738 enum pci_channel_state state)
4347{ 4739{
4348 struct net_device *ndev = pci_get_drvdata(pdev); 4740 struct net_device *ndev = pci_get_drvdata(pdev);
4741 struct ql_adapter *qdev = netdev_priv(ndev);
4349 4742
4350 switch (state) { 4743 switch (state) {
4351 case pci_channel_io_normal: 4744 case pci_channel_io_normal:
@@ -4359,6 +4752,8 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4359 case pci_channel_io_perm_failure: 4752 case pci_channel_io_perm_failure:
4360 dev_err(&pdev->dev, 4753 dev_err(&pdev->dev,
4361 "%s: pci_channel_io_perm_failure.\n", __func__); 4754 "%s: pci_channel_io_perm_failure.\n", __func__);
4755 ql_eeh_close(ndev);
4756 set_bit(QL_EEH_FATAL, &qdev->flags);
4362 return PCI_ERS_RESULT_DISCONNECT; 4757 return PCI_ERS_RESULT_DISCONNECT;
4363 } 4758 }
4364 4759
@@ -4381,11 +4776,18 @@ static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4381 4776
4382 pci_restore_state(pdev); 4777 pci_restore_state(pdev);
4383 if (pci_enable_device(pdev)) { 4778 if (pci_enable_device(pdev)) {
4384 QPRINTK(qdev, IFUP, ERR, 4779 netif_err(qdev, ifup, qdev->ndev,
4385 "Cannot re-enable PCI device after reset.\n"); 4780 "Cannot re-enable PCI device after reset.\n");
4386 return PCI_ERS_RESULT_DISCONNECT; 4781 return PCI_ERS_RESULT_DISCONNECT;
4387 } 4782 }
4388 pci_set_master(pdev); 4783 pci_set_master(pdev);
4784
4785 if (ql_adapter_reset(qdev)) {
4786 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4787 set_bit(QL_EEH_FATAL, &qdev->flags);
4788 return PCI_ERS_RESULT_DISCONNECT;
4789 }
4790
4389 return PCI_ERS_RESULT_RECOVERED; 4791 return PCI_ERS_RESULT_RECOVERED;
4390} 4792}
4391 4793
@@ -4395,19 +4797,19 @@ static void qlge_io_resume(struct pci_dev *pdev)
4395 struct ql_adapter *qdev = netdev_priv(ndev); 4797 struct ql_adapter *qdev = netdev_priv(ndev);
4396 int err = 0; 4798 int err = 0;
4397 4799
4398 if (ql_adapter_reset(qdev))
4399 QPRINTK(qdev, DRV, ERR, "reset FAILED!\n");
4400 if (netif_running(ndev)) { 4800 if (netif_running(ndev)) {
4401 err = qlge_open(ndev); 4801 err = qlge_open(ndev);
4402 if (err) { 4802 if (err) {
4403 QPRINTK(qdev, IFUP, ERR, 4803 netif_err(qdev, ifup, qdev->ndev,
4404 "Device initialization failed after reset.\n"); 4804 "Device initialization failed after reset.\n");
4405 return; 4805 return;
4406 } 4806 }
4407 } else { 4807 } else {
4408 QPRINTK(qdev, IFUP, ERR, 4808 netif_err(qdev, ifup, qdev->ndev,
4409 "Device was not running prior to EEH.\n"); 4809 "Device was not running prior to EEH.\n");
4410 } 4810 }
4811 qdev->timer.expires = jiffies + (5*HZ);
4812 add_timer(&qdev->timer);
4411 netif_device_attach(ndev); 4813 netif_device_attach(ndev);
4412} 4814}
4413 4815
@@ -4424,6 +4826,7 @@ static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4424 int err; 4826 int err;
4425 4827
4426 netif_device_detach(ndev); 4828 netif_device_detach(ndev);
4829 del_timer_sync(&qdev->timer);
4427 4830
4428 if (netif_running(ndev)) { 4831 if (netif_running(ndev)) {
4429 err = ql_adapter_down(qdev); 4832 err = ql_adapter_down(qdev);
@@ -4454,7 +4857,7 @@ static int qlge_resume(struct pci_dev *pdev)
4454 pci_restore_state(pdev); 4857 pci_restore_state(pdev);
4455 err = pci_enable_device(pdev); 4858 err = pci_enable_device(pdev);
4456 if (err) { 4859 if (err) {
4457 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n"); 4860 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4458 return err; 4861 return err;
4459 } 4862 }
4460 pci_set_master(pdev); 4863 pci_set_master(pdev);
@@ -4468,6 +4871,8 @@ static int qlge_resume(struct pci_dev *pdev)
4468 return err; 4871 return err;
4469 } 4872 }
4470 4873
4874 qdev->timer.expires = jiffies + (5*HZ);
4875 add_timer(&qdev->timer);
4471 netif_device_attach(ndev); 4876 netif_device_attach(ndev);
4472 4877
4473 return 0; 4878 return 0;
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index e2b2286102d4..3c00462a5d22 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -1,5 +1,54 @@
1#include "qlge.h" 1#include "qlge.h"
2 2
3int ql_unpause_mpi_risc(struct ql_adapter *qdev)
4{
5 u32 tmp;
6
7 /* Un-pause the RISC */
8 tmp = ql_read32(qdev, CSR);
9 if (!(tmp & CSR_RP))
10 return -EIO;
11
12 ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
13 return 0;
14}
15
16int ql_pause_mpi_risc(struct ql_adapter *qdev)
17{
18 u32 tmp;
19 int count = UDELAY_COUNT;
20
21 /* Pause the RISC */
22 ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
23 do {
24 tmp = ql_read32(qdev, CSR);
25 if (tmp & CSR_RP)
26 break;
27 mdelay(UDELAY_DELAY);
28 count--;
29 } while (count);
30 return (count == 0) ? -ETIMEDOUT : 0;
31}
32
33int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
34{
35 u32 tmp;
36 int count = UDELAY_COUNT;
37
38 /* Reset the RISC */
39 ql_write32(qdev, CSR, CSR_CMD_SET_RST);
40 do {
41 tmp = ql_read32(qdev, CSR);
42 if (tmp & CSR_RR) {
43 ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
44 break;
45 }
46 mdelay(UDELAY_DELAY);
47 count--;
48 } while (count);
49 return (count == 0) ? -ETIMEDOUT : 0;
50}
51
3int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data) 52int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
4{ 53{
5 int status; 54 int status;
@@ -45,6 +94,35 @@ int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
45 return status; 94 return status;
46} 95}
47 96
97/* Determine if we are in charge of the firwmare. If
98 * we are the lower of the 2 NIC pcie functions, or if
99 * we are the higher function and the lower function
100 * is not enabled.
101 */
102int ql_own_firmware(struct ql_adapter *qdev)
103{
104 u32 temp;
105
106 /* If we are the lower of the 2 NIC functions
107 * on the chip the we are responsible for
108 * core dump and firmware reset after an error.
109 */
110 if (qdev->func < qdev->alt_func)
111 return 1;
112
113 /* If we are the higher of the 2 NIC functions
114 * on the chip and the lower function is not
115 * enabled, then we are responsible for
116 * core dump and firmware reset after an error.
117 */
118 temp = ql_read32(qdev, STS);
119 if (!(temp & (1 << (8 + qdev->alt_func))))
120 return 1;
121
122 return 0;
123
124}
125
48static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp) 126static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
49{ 127{
50 int i, status; 128 int i, status;
@@ -57,7 +135,7 @@ static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
57 ql_read_mpi_reg(qdev, qdev->mailbox_out + i, 135 ql_read_mpi_reg(qdev, qdev->mailbox_out + i,
58 &mbcp->mbox_out[i]); 136 &mbcp->mbox_out[i]);
59 if (status) { 137 if (status) {
60 QPRINTK(qdev, DRV, ERR, "Failed mailbox read.\n"); 138 netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n");
61 break; 139 break;
62 } 140 }
63 } 141 }
@@ -130,7 +208,7 @@ static int ql_idc_req_aen(struct ql_adapter *qdev)
130 int status; 208 int status;
131 struct mbox_params *mbcp = &qdev->idc_mbc; 209 struct mbox_params *mbcp = &qdev->idc_mbc;
132 210
133 QPRINTK(qdev, DRV, ERR, "Enter!\n"); 211 netif_err(qdev, drv, qdev->ndev, "Enter!\n");
134 /* Get the status data and start up a thread to 212 /* Get the status data and start up a thread to
135 * handle the request. 213 * handle the request.
136 */ 214 */
@@ -138,8 +216,8 @@ static int ql_idc_req_aen(struct ql_adapter *qdev)
138 mbcp->out_count = 4; 216 mbcp->out_count = 4;
139 status = ql_get_mb_sts(qdev, mbcp); 217 status = ql_get_mb_sts(qdev, mbcp);
140 if (status) { 218 if (status) {
141 QPRINTK(qdev, DRV, ERR, 219 netif_err(qdev, drv, qdev->ndev,
142 "Could not read MPI, resetting ASIC!\n"); 220 "Could not read MPI, resetting ASIC!\n");
143 ql_queue_asic_error(qdev); 221 ql_queue_asic_error(qdev);
144 } else { 222 } else {
145 /* Begin polled mode early so 223 /* Begin polled mode early so
@@ -162,8 +240,8 @@ static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
162 mbcp->out_count = 4; 240 mbcp->out_count = 4;
163 status = ql_get_mb_sts(qdev, mbcp); 241 status = ql_get_mb_sts(qdev, mbcp);
164 if (status) { 242 if (status) {
165 QPRINTK(qdev, DRV, ERR, 243 netif_err(qdev, drv, qdev->ndev,
166 "Could not read MPI, resetting RISC!\n"); 244 "Could not read MPI, resetting RISC!\n");
167 ql_queue_fw_error(qdev); 245 ql_queue_fw_error(qdev);
168 } else 246 } else
169 /* Wake up the sleeping mpi_idc_work thread that is 247 /* Wake up the sleeping mpi_idc_work thread that is
@@ -181,13 +259,13 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
181 259
182 status = ql_get_mb_sts(qdev, mbcp); 260 status = ql_get_mb_sts(qdev, mbcp);
183 if (status) { 261 if (status) {
184 QPRINTK(qdev, DRV, ERR, 262 netif_err(qdev, drv, qdev->ndev,
185 "%s: Could not get mailbox status.\n", __func__); 263 "%s: Could not get mailbox status.\n", __func__);
186 return; 264 return;
187 } 265 }
188 266
189 qdev->link_status = mbcp->mbox_out[1]; 267 qdev->link_status = mbcp->mbox_out[1];
190 QPRINTK(qdev, DRV, ERR, "Link Up.\n"); 268 netif_err(qdev, drv, qdev->ndev, "Link Up.\n");
191 269
192 /* If we're coming back from an IDC event 270 /* If we're coming back from an IDC event
193 * then set up the CAM and frame routing. 271 * then set up the CAM and frame routing.
@@ -195,8 +273,8 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
195 if (test_bit(QL_CAM_RT_SET, &qdev->flags)) { 273 if (test_bit(QL_CAM_RT_SET, &qdev->flags)) {
196 status = ql_cam_route_initialize(qdev); 274 status = ql_cam_route_initialize(qdev);
197 if (status) { 275 if (status) {
198 QPRINTK(qdev, IFUP, ERR, 276 netif_err(qdev, ifup, qdev->ndev,
199 "Failed to init CAM/Routing tables.\n"); 277 "Failed to init CAM/Routing tables.\n");
200 return; 278 return;
201 } else 279 } else
202 clear_bit(QL_CAM_RT_SET, &qdev->flags); 280 clear_bit(QL_CAM_RT_SET, &qdev->flags);
@@ -207,7 +285,7 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
207 * to our liking. 285 * to our liking.
208 */ 286 */
209 if (!test_bit(QL_PORT_CFG, &qdev->flags)) { 287 if (!test_bit(QL_PORT_CFG, &qdev->flags)) {
210 QPRINTK(qdev, DRV, ERR, "Queue Port Config Worker!\n"); 288 netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n");
211 set_bit(QL_PORT_CFG, &qdev->flags); 289 set_bit(QL_PORT_CFG, &qdev->flags);
212 /* Begin polled mode early so 290 /* Begin polled mode early so
213 * we don't get another interrupt 291 * we don't get another interrupt
@@ -229,7 +307,7 @@ static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
229 307
230 status = ql_get_mb_sts(qdev, mbcp); 308 status = ql_get_mb_sts(qdev, mbcp);
231 if (status) 309 if (status)
232 QPRINTK(qdev, DRV, ERR, "Link down AEN broken!\n"); 310 netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n");
233 311
234 ql_link_off(qdev); 312 ql_link_off(qdev);
235} 313}
@@ -242,9 +320,9 @@ static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
242 320
243 status = ql_get_mb_sts(qdev, mbcp); 321 status = ql_get_mb_sts(qdev, mbcp);
244 if (status) 322 if (status)
245 QPRINTK(qdev, DRV, ERR, "SFP in AEN broken!\n"); 323 netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n");
246 else 324 else
247 QPRINTK(qdev, DRV, ERR, "SFP insertion detected.\n"); 325 netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n");
248 326
249 return status; 327 return status;
250} 328}
@@ -257,9 +335,9 @@ static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp)
257 335
258 status = ql_get_mb_sts(qdev, mbcp); 336 status = ql_get_mb_sts(qdev, mbcp);
259 if (status) 337 if (status)
260 QPRINTK(qdev, DRV, ERR, "SFP out AEN broken!\n"); 338 netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n");
261 else 339 else
262 QPRINTK(qdev, DRV, ERR, "SFP removal detected.\n"); 340 netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n");
263 341
264 return status; 342 return status;
265} 343}
@@ -272,13 +350,13 @@ static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
272 350
273 status = ql_get_mb_sts(qdev, mbcp); 351 status = ql_get_mb_sts(qdev, mbcp);
274 if (status) 352 if (status)
275 QPRINTK(qdev, DRV, ERR, "Lost AEN broken!\n"); 353 netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n");
276 else { 354 else {
277 int i; 355 int i;
278 QPRINTK(qdev, DRV, ERR, "Lost AEN detected.\n"); 356 netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n");
279 for (i = 0; i < mbcp->out_count; i++) 357 for (i = 0; i < mbcp->out_count; i++)
280 QPRINTK(qdev, DRV, ERR, "mbox_out[%d] = 0x%.08x.\n", 358 netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n",
281 i, mbcp->mbox_out[i]); 359 i, mbcp->mbox_out[i]);
282 360
283 } 361 }
284 362
@@ -293,15 +371,15 @@ static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
293 371
294 status = ql_get_mb_sts(qdev, mbcp); 372 status = ql_get_mb_sts(qdev, mbcp);
295 if (status) { 373 if (status) {
296 QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n"); 374 netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n");
297 } else { 375 } else {
298 QPRINTK(qdev, DRV, ERR, "Firmware Revision = 0x%.08x.\n", 376 netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n",
299 mbcp->mbox_out[1]); 377 mbcp->mbox_out[1]);
300 qdev->fw_rev_id = mbcp->mbox_out[1]; 378 qdev->fw_rev_id = mbcp->mbox_out[1];
301 status = ql_cam_route_initialize(qdev); 379 status = ql_cam_route_initialize(qdev);
302 if (status) 380 if (status)
303 QPRINTK(qdev, IFUP, ERR, 381 netif_err(qdev, ifup, qdev->ndev,
304 "Failed to init CAM/Routing tables.\n"); 382 "Failed to init CAM/Routing tables.\n");
305 } 383 }
306} 384}
307 385
@@ -320,8 +398,8 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
320 mbcp->out_count = 1; 398 mbcp->out_count = 1;
321 status = ql_get_mb_sts(qdev, mbcp); 399 status = ql_get_mb_sts(qdev, mbcp);
322 if (status) { 400 if (status) {
323 QPRINTK(qdev, DRV, ERR, 401 netif_err(qdev, drv, qdev->ndev,
324 "Could not read MPI, resetting ASIC!\n"); 402 "Could not read MPI, resetting ASIC!\n");
325 ql_queue_asic_error(qdev); 403 ql_queue_asic_error(qdev);
326 goto end; 404 goto end;
327 } 405 }
@@ -410,15 +488,14 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
410 mbcp->mbox_out[0] = MB_CMD_STS_ERR; 488 mbcp->mbox_out[0] = MB_CMD_STS_ERR;
411 return status; 489 return status;
412 } 490 }
413 QPRINTK(qdev, DRV, ERR, 491 netif_err(qdev, drv, qdev->ndev,
414 "Firmware initialization failed.\n"); 492 "Firmware initialization failed.\n");
415 status = -EIO; 493 status = -EIO;
416 ql_queue_fw_error(qdev); 494 ql_queue_fw_error(qdev);
417 break; 495 break;
418 496
419 case AEN_SYS_ERR: 497 case AEN_SYS_ERR:
420 QPRINTK(qdev, DRV, ERR, 498 netif_err(qdev, drv, qdev->ndev, "System Error.\n");
421 "System Error.\n");
422 ql_queue_fw_error(qdev); 499 ql_queue_fw_error(qdev);
423 status = -EIO; 500 status = -EIO;
424 break; 501 break;
@@ -431,8 +508,8 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
431 /* Need to support AEN 8110 */ 508 /* Need to support AEN 8110 */
432 break; 509 break;
433 default: 510 default:
434 QPRINTK(qdev, DRV, ERR, 511 netif_err(qdev, drv, qdev->ndev,
435 "Unsupported AE %.08x.\n", mbcp->mbox_out[0]); 512 "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
436 /* Clear the MPI firmware status. */ 513 /* Clear the MPI firmware status. */
437 } 514 }
438end: 515end:
@@ -505,8 +582,8 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
505 goto done; 582 goto done;
506 } while (time_before(jiffies, count)); 583 } while (time_before(jiffies, count));
507 584
508 QPRINTK(qdev, DRV, ERR, 585 netif_err(qdev, drv, qdev->ndev,
509 "Timed out waiting for mailbox complete.\n"); 586 "Timed out waiting for mailbox complete.\n");
510 status = -ETIMEDOUT; 587 status = -ETIMEDOUT;
511 goto end; 588 goto end;
512 589
@@ -529,6 +606,22 @@ end:
529 return status; 606 return status;
530} 607}
531 608
609int ql_mb_sys_err(struct ql_adapter *qdev)
610{
611 struct mbox_params mbc;
612 struct mbox_params *mbcp = &mbc;
613 int status;
614
615 memset(mbcp, 0, sizeof(struct mbox_params));
616
617 mbcp->in_count = 1;
618 mbcp->out_count = 0;
619
620 mbcp->mbox_in[0] = MB_CMD_MAKE_SYS_ERR;
621
622 status = ql_mailbox_command(qdev, mbcp);
623 return status;
624}
532 625
533/* Get MPI firmware version. This will be used for 626/* Get MPI firmware version. This will be used for
534 * driver banner and for ethtool info. 627 * driver banner and for ethtool info.
@@ -552,8 +645,8 @@ int ql_mb_about_fw(struct ql_adapter *qdev)
552 return status; 645 return status;
553 646
554 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { 647 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
555 QPRINTK(qdev, DRV, ERR, 648 netif_err(qdev, drv, qdev->ndev,
556 "Failed about firmware command\n"); 649 "Failed about firmware command\n");
557 status = -EIO; 650 status = -EIO;
558 } 651 }
559 652
@@ -584,8 +677,8 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
584 return status; 677 return status;
585 678
586 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { 679 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
587 QPRINTK(qdev, DRV, ERR, 680 netif_err(qdev, drv, qdev->ndev,
588 "Failed Get Firmware State.\n"); 681 "Failed Get Firmware State.\n");
589 status = -EIO; 682 status = -EIO;
590 } 683 }
591 684
@@ -594,8 +687,8 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
594 * happen. 687 * happen.
595 */ 688 */
596 if (mbcp->mbox_out[1] & 1) { 689 if (mbcp->mbox_out[1] & 1) {
597 QPRINTK(qdev, DRV, ERR, 690 netif_err(qdev, drv, qdev->ndev,
598 "Firmware waiting for initialization.\n"); 691 "Firmware waiting for initialization.\n");
599 status = -EIO; 692 status = -EIO;
600 } 693 }
601 694
@@ -627,8 +720,7 @@ int ql_mb_idc_ack(struct ql_adapter *qdev)
627 return status; 720 return status;
628 721
629 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { 722 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
630 QPRINTK(qdev, DRV, ERR, 723 netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n");
631 "Failed IDC ACK send.\n");
632 status = -EIO; 724 status = -EIO;
633 } 725 }
634 return status; 726 return status;
@@ -659,16 +751,72 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
659 return status; 751 return status;
660 752
661 if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) { 753 if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) {
662 QPRINTK(qdev, DRV, ERR, 754 netif_err(qdev, drv, qdev->ndev,
663 "Port Config sent, wait for IDC.\n"); 755 "Port Config sent, wait for IDC.\n");
664 } else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { 756 } else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
665 QPRINTK(qdev, DRV, ERR, 757 netif_err(qdev, drv, qdev->ndev,
666 "Failed Set Port Configuration.\n"); 758 "Failed Set Port Configuration.\n");
667 status = -EIO; 759 status = -EIO;
668 } 760 }
669 return status; 761 return status;
670} 762}
671 763
764int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
765 u32 size)
766{
767 int status = 0;
768 struct mbox_params mbc;
769 struct mbox_params *mbcp = &mbc;
770
771 memset(mbcp, 0, sizeof(struct mbox_params));
772
773 mbcp->in_count = 9;
774 mbcp->out_count = 1;
775
776 mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM;
777 mbcp->mbox_in[1] = LSW(addr);
778 mbcp->mbox_in[2] = MSW(req_dma);
779 mbcp->mbox_in[3] = LSW(req_dma);
780 mbcp->mbox_in[4] = MSW(size);
781 mbcp->mbox_in[5] = LSW(size);
782 mbcp->mbox_in[6] = MSW(MSD(req_dma));
783 mbcp->mbox_in[7] = LSW(MSD(req_dma));
784 mbcp->mbox_in[8] = MSW(addr);
785
786
787 status = ql_mailbox_command(qdev, mbcp);
788 if (status)
789 return status;
790
791 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
792 netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n");
793 status = -EIO;
794 }
795 return status;
796}
797
798/* Issue a mailbox command to dump RISC RAM. */
799int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
800 u32 ram_addr, int word_count)
801{
802 int status;
803 char *my_buf;
804 dma_addr_t buf_dma;
805
806 my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32),
807 &buf_dma);
808 if (!my_buf)
809 return -EIO;
810
811 status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
812 if (!status)
813 memcpy(buf, my_buf, word_count * sizeof(u32));
814
815 pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf,
816 buf_dma);
817 return status;
818}
819
672/* Get link settings and maximum frame size settings 820/* Get link settings and maximum frame size settings
673 * for the current port. 821 * for the current port.
674 * Most likely will block. 822 * Most likely will block.
@@ -691,12 +839,12 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev)
691 return status; 839 return status;
692 840
693 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { 841 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
694 QPRINTK(qdev, DRV, ERR, 842 netif_err(qdev, drv, qdev->ndev,
695 "Failed Get Port Configuration.\n"); 843 "Failed Get Port Configuration.\n");
696 status = -EIO; 844 status = -EIO;
697 } else { 845 } else {
698 QPRINTK(qdev, DRV, DEBUG, 846 netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
699 "Passed Get Port Configuration.\n"); 847 "Passed Get Port Configuration.\n");
700 qdev->link_config = mbcp->mbox_out[1]; 848 qdev->link_config = mbcp->mbox_out[1];
701 qdev->max_frame_size = mbcp->mbox_out[2]; 849 qdev->max_frame_size = mbcp->mbox_out[2];
702 } 850 }
@@ -723,8 +871,7 @@ int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
723 return status; 871 return status;
724 872
725 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { 873 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
726 QPRINTK(qdev, DRV, ERR, 874 netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
727 "Failed to set WOL mode.\n");
728 status = -EIO; 875 status = -EIO;
729 } 876 }
730 return status; 877 return status;
@@ -766,8 +913,7 @@ int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
766 return status; 913 return status;
767 914
768 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { 915 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
769 QPRINTK(qdev, DRV, ERR, 916 netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
770 "Failed to set WOL mode.\n");
771 status = -EIO; 917 status = -EIO;
772 } 918 }
773 return status; 919 return status;
@@ -793,8 +939,7 @@ static int ql_idc_wait(struct ql_adapter *qdev)
793 wait_for_completion_timeout(&qdev->ide_completion, 939 wait_for_completion_timeout(&qdev->ide_completion,
794 wait_time); 940 wait_time);
795 if (!wait_time) { 941 if (!wait_time) {
796 QPRINTK(qdev, DRV, ERR, 942 netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n");
797 "IDC Timeout.\n");
798 break; 943 break;
799 } 944 }
800 /* Now examine the response from the IDC process. 945 /* Now examine the response from the IDC process.
@@ -802,18 +947,17 @@ static int ql_idc_wait(struct ql_adapter *qdev)
802 * more wait time. 947 * more wait time.
803 */ 948 */
804 if (mbcp->mbox_out[0] == AEN_IDC_EXT) { 949 if (mbcp->mbox_out[0] == AEN_IDC_EXT) {
805 QPRINTK(qdev, DRV, ERR, 950 netif_err(qdev, drv, qdev->ndev,
806 "IDC Time Extension from function.\n"); 951 "IDC Time Extension from function.\n");
807 wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f; 952 wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f;
808 } else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) { 953 } else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) {
809 QPRINTK(qdev, DRV, ERR, 954 netif_err(qdev, drv, qdev->ndev, "IDC Success.\n");
810 "IDC Success.\n");
811 status = 0; 955 status = 0;
812 break; 956 break;
813 } else { 957 } else {
814 QPRINTK(qdev, DRV, ERR, 958 netif_err(qdev, drv, qdev->ndev,
815 "IDC: Invalid State 0x%.04x.\n", 959 "IDC: Invalid State 0x%.04x.\n",
816 mbcp->mbox_out[0]); 960 mbcp->mbox_out[0]);
817 status = -EIO; 961 status = -EIO;
818 break; 962 break;
819 } 963 }
@@ -842,8 +986,8 @@ int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
842 return status; 986 return status;
843 987
844 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { 988 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
845 QPRINTK(qdev, DRV, ERR, 989 netif_err(qdev, drv, qdev->ndev,
846 "Failed to set LED Configuration.\n"); 990 "Failed to set LED Configuration.\n");
847 status = -EIO; 991 status = -EIO;
848 } 992 }
849 993
@@ -868,8 +1012,8 @@ int ql_mb_get_led_cfg(struct ql_adapter *qdev)
868 return status; 1012 return status;
869 1013
870 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { 1014 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
871 QPRINTK(qdev, DRV, ERR, 1015 netif_err(qdev, drv, qdev->ndev,
872 "Failed to get LED Configuration.\n"); 1016 "Failed to get LED Configuration.\n");
873 status = -EIO; 1017 status = -EIO;
874 } else 1018 } else
875 qdev->led_config = mbcp->mbox_out[1]; 1019 qdev->led_config = mbcp->mbox_out[1];
@@ -899,16 +1043,16 @@ int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
899 return status; 1043 return status;
900 1044
901 if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { 1045 if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
902 QPRINTK(qdev, DRV, ERR, 1046 netif_err(qdev, drv, qdev->ndev,
903 "Command not supported by firmware.\n"); 1047 "Command not supported by firmware.\n");
904 status = -EINVAL; 1048 status = -EINVAL;
905 } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { 1049 } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
906 /* This indicates that the firmware is 1050 /* This indicates that the firmware is
907 * already in the state we are trying to 1051 * already in the state we are trying to
908 * change it to. 1052 * change it to.
909 */ 1053 */
910 QPRINTK(qdev, DRV, ERR, 1054 netif_err(qdev, drv, qdev->ndev,
911 "Command parameters make no change.\n"); 1055 "Command parameters make no change.\n");
912 } 1056 }
913 return status; 1057 return status;
914} 1058}
@@ -938,12 +1082,12 @@ static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
938 } 1082 }
939 1083
940 if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { 1084 if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
941 QPRINTK(qdev, DRV, ERR, 1085 netif_err(qdev, drv, qdev->ndev,
942 "Command not supported by firmware.\n"); 1086 "Command not supported by firmware.\n");
943 status = -EINVAL; 1087 status = -EINVAL;
944 } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { 1088 } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
945 QPRINTK(qdev, DRV, ERR, 1089 netif_err(qdev, drv, qdev->ndev,
946 "Failed to get MPI traffic control.\n"); 1090 "Failed to get MPI traffic control.\n");
947 status = -EIO; 1091 status = -EIO;
948 } 1092 }
949 return status; 1093 return status;
@@ -999,8 +1143,8 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
999 status = ql_mb_get_port_cfg(qdev); 1143 status = ql_mb_get_port_cfg(qdev);
1000 rtnl_unlock(); 1144 rtnl_unlock();
1001 if (status) { 1145 if (status) {
1002 QPRINTK(qdev, DRV, ERR, 1146 netif_err(qdev, drv, qdev->ndev,
1003 "Bug: Failed to get port config data.\n"); 1147 "Bug: Failed to get port config data.\n");
1004 goto err; 1148 goto err;
1005 } 1149 }
1006 1150
@@ -1013,8 +1157,8 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
1013 qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE; 1157 qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE;
1014 status = ql_set_port_cfg(qdev); 1158 status = ql_set_port_cfg(qdev);
1015 if (status) { 1159 if (status) {
1016 QPRINTK(qdev, DRV, ERR, 1160 netif_err(qdev, drv, qdev->ndev,
1017 "Bug: Failed to set port config data.\n"); 1161 "Bug: Failed to set port config data.\n");
1018 goto err; 1162 goto err;
1019 } 1163 }
1020end: 1164end:
@@ -1046,8 +1190,8 @@ void ql_mpi_idc_work(struct work_struct *work)
1046 1190
1047 switch (aen) { 1191 switch (aen) {
1048 default: 1192 default:
1049 QPRINTK(qdev, DRV, ERR, 1193 netif_err(qdev, drv, qdev->ndev,
1050 "Bug: Unhandled IDC action.\n"); 1194 "Bug: Unhandled IDC action.\n");
1051 break; 1195 break;
1052 case MB_CMD_PORT_RESET: 1196 case MB_CMD_PORT_RESET:
1053 case MB_CMD_STOP_FW: 1197 case MB_CMD_STOP_FW:
@@ -1062,11 +1206,11 @@ void ql_mpi_idc_work(struct work_struct *work)
1062 if (timeout) { 1206 if (timeout) {
1063 status = ql_mb_idc_ack(qdev); 1207 status = ql_mb_idc_ack(qdev);
1064 if (status) 1208 if (status)
1065 QPRINTK(qdev, DRV, ERR, 1209 netif_err(qdev, drv, qdev->ndev,
1066 "Bug: No pending IDC!\n"); 1210 "Bug: No pending IDC!\n");
1067 } else { 1211 } else {
1068 QPRINTK(qdev, DRV, DEBUG, 1212 netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
1069 "IDC ACK not required\n"); 1213 "IDC ACK not required\n");
1070 status = 0; /* success */ 1214 status = 0; /* success */
1071 } 1215 }
1072 break; 1216 break;
@@ -1095,11 +1239,11 @@ void ql_mpi_idc_work(struct work_struct *work)
1095 if (timeout) { 1239 if (timeout) {
1096 status = ql_mb_idc_ack(qdev); 1240 status = ql_mb_idc_ack(qdev);
1097 if (status) 1241 if (status)
1098 QPRINTK(qdev, DRV, ERR, 1242 netif_err(qdev, drv, qdev->ndev,
1099 "Bug: No pending IDC!\n"); 1243 "Bug: No pending IDC!\n");
1100 } else { 1244 } else {
1101 QPRINTK(qdev, DRV, DEBUG, 1245 netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
1102 "IDC ACK not required\n"); 1246 "IDC ACK not required\n");
1103 status = 0; /* success */ 1247 status = 0; /* success */
1104 } 1248 }
1105 break; 1249 break;
@@ -1143,5 +1287,19 @@ void ql_mpi_reset_work(struct work_struct *work)
1143 cancel_delayed_work_sync(&qdev->mpi_work); 1287 cancel_delayed_work_sync(&qdev->mpi_work);
1144 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); 1288 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
1145 cancel_delayed_work_sync(&qdev->mpi_idc_work); 1289 cancel_delayed_work_sync(&qdev->mpi_idc_work);
1290 /* If we're not the dominant NIC function,
1291 * then there is nothing to do.
1292 */
1293 if (!ql_own_firmware(qdev)) {
1294 netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
1295 return;
1296 }
1297
1298 if (!ql_core_dump(qdev, qdev->mpi_coredump)) {
1299 netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
1300 qdev->core_is_dumped = 1;
1301 queue_delayed_work(qdev->workqueue,
1302 &qdev->mpi_core_to_log, 5 * HZ);
1303 }
1146 ql_soft_reset_mpi_risc(qdev); 1304 ql_soft_reset_mpi_risc(qdev);
1147} 1305}
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index f03e2e4a15a8..15d5373dc8f3 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -938,7 +938,7 @@ static void r6040_multicast_list(struct net_device *dev)
938 u16 *adrp; 938 u16 *adrp;
939 u16 reg; 939 u16 reg;
940 unsigned long flags; 940 unsigned long flags;
941 struct dev_mc_list *dmi = dev->mc_list; 941 struct dev_mc_list *dmi;
942 int i; 942 int i;
943 943
944 /* MAC Address */ 944 /* MAC Address */
@@ -958,25 +958,24 @@ static void r6040_multicast_list(struct net_device *dev)
958 } 958 }
959 /* Too many multicast addresses 959 /* Too many multicast addresses
960 * accept all traffic */ 960 * accept all traffic */
961 else if ((dev->mc_count > MCAST_MAX) || (dev->flags & IFF_ALLMULTI)) 961 else if ((netdev_mc_count(dev) > MCAST_MAX) ||
962 (dev->flags & IFF_ALLMULTI))
962 reg |= 0x0020; 963 reg |= 0x0020;
963 964
964 iowrite16(reg, ioaddr); 965 iowrite16(reg, ioaddr);
965 spin_unlock_irqrestore(&lp->lock, flags); 966 spin_unlock_irqrestore(&lp->lock, flags);
966 967
967 /* Build the hash table */ 968 /* Build the hash table */
968 if (dev->mc_count > MCAST_MAX) { 969 if (netdev_mc_count(dev) > MCAST_MAX) {
969 u16 hash_table[4]; 970 u16 hash_table[4];
970 u32 crc; 971 u32 crc;
971 972
972 for (i = 0; i < 4; i++) 973 for (i = 0; i < 4; i++)
973 hash_table[i] = 0; 974 hash_table[i] = 0;
974 975
975 for (i = 0; i < dev->mc_count; i++) { 976 netdev_for_each_mc_addr(dmi, dev) {
976 char *addrs = dmi->dmi_addr; 977 char *addrs = dmi->dmi_addr;
977 978
978 dmi = dmi->next;
979
980 if (!(*addrs & 1)) 979 if (!(*addrs & 1))
981 continue; 980 continue;
982 981
@@ -994,17 +993,19 @@ static void r6040_multicast_list(struct net_device *dev)
994 iowrite16(hash_table[3], ioaddr + MAR3); 993 iowrite16(hash_table[3], ioaddr + MAR3);
995 } 994 }
996 /* Multicast Address 1~4 case */ 995 /* Multicast Address 1~4 case */
997 for (i = 0, dmi; (i < dev->mc_count) && (i < MCAST_MAX); i++) { 996 i = 0;
998 adrp = (u16 *)dmi->dmi_addr; 997 netdev_for_each_mc_addr(dmi, dev) {
999 iowrite16(adrp[0], ioaddr + MID_1L + 8*i); 998 if (i < MCAST_MAX) {
1000 iowrite16(adrp[1], ioaddr + MID_1M + 8*i); 999 adrp = (u16 *) dmi->dmi_addr;
1001 iowrite16(adrp[2], ioaddr + MID_1H + 8*i); 1000 iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
1002 dmi = dmi->next; 1001 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
1003 } 1002 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
1004 for (i = dev->mc_count; i < MCAST_MAX; i++) { 1003 } else {
1005 iowrite16(0xffff, ioaddr + MID_0L + 8*i); 1004 iowrite16(0xffff, ioaddr + MID_0L + 8 * i);
1006 iowrite16(0xffff, ioaddr + MID_0M + 8*i); 1005 iowrite16(0xffff, ioaddr + MID_0M + 8 * i);
1007 iowrite16(0xffff, ioaddr + MID_0H + 8*i); 1006 iowrite16(0xffff, ioaddr + MID_0H + 8 * i);
1007 }
1008 i++;
1008 } 1009 }
1009} 1010}
1010 1011
@@ -1222,7 +1223,7 @@ static void __devexit r6040_remove_one(struct pci_dev *pdev)
1222} 1223}
1223 1224
1224 1225
1225static struct pci_device_id r6040_pci_tbl[] = { 1226static DEFINE_PCI_DEVICE_TABLE(r6040_pci_tbl) = {
1226 { PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) }, 1227 { PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) },
1227 { 0 } 1228 { 0 }
1228}; 1229};
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 60f96c468a24..dfc3573c91bb 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -168,7 +168,7 @@ static void rtl_hw_start_8169(struct net_device *);
168static void rtl_hw_start_8168(struct net_device *); 168static void rtl_hw_start_8168(struct net_device *);
169static void rtl_hw_start_8101(struct net_device *); 169static void rtl_hw_start_8101(struct net_device *);
170 170
171static struct pci_device_id rtl8169_pci_tbl[] = { 171static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
172 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, 172 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
173 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, 173 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
174 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, 174 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
@@ -187,7 +187,7 @@ static struct pci_device_id rtl8169_pci_tbl[] = {
187MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); 187MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
188 188
189static int rx_copybreak = 200; 189static int rx_copybreak = 200;
190static int use_dac; 190static int use_dac = -1;
191static struct { 191static struct {
192 u32 msg_enable; 192 u32 msg_enable;
193} debug = { -1 }; 193} debug = { -1 };
@@ -511,7 +511,8 @@ MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
511module_param(rx_copybreak, int, 0); 511module_param(rx_copybreak, int, 0);
512MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); 512MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
513module_param(use_dac, int, 0); 513module_param(use_dac, int, 0);
514MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); 514MODULE_PARM_DESC(use_dac, "Enable PCI DAC. -1 defaults on for PCI Express only."
515" Unsafe on 32 bit PCI slot.");
515module_param_named(debug, debug.msg_enable, int, 0); 516module_param_named(debug, debug.msg_enable, int, 0);
516MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); 517MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
517MODULE_LICENSE("GPL"); 518MODULE_LICENSE("GPL");
@@ -744,12 +745,10 @@ static void rtl8169_check_link_status(struct net_device *dev,
744 spin_lock_irqsave(&tp->lock, flags); 745 spin_lock_irqsave(&tp->lock, flags);
745 if (tp->link_ok(ioaddr)) { 746 if (tp->link_ok(ioaddr)) {
746 netif_carrier_on(dev); 747 netif_carrier_on(dev);
747 if (netif_msg_ifup(tp)) 748 netif_info(tp, ifup, dev, "link up\n");
748 printk(KERN_INFO PFX "%s: link up\n", dev->name);
749 } else { 749 } else {
750 if (netif_msg_ifdown(tp))
751 printk(KERN_INFO PFX "%s: link down\n", dev->name);
752 netif_carrier_off(dev); 750 netif_carrier_off(dev);
751 netif_info(tp, ifdown, dev, "link down\n");
753 } 752 }
754 spin_unlock_irqrestore(&tp->lock, flags); 753 spin_unlock_irqrestore(&tp->lock, flags);
755} 754}
@@ -862,11 +861,8 @@ static int rtl8169_set_speed_tbi(struct net_device *dev,
862 } else if (autoneg == AUTONEG_ENABLE) 861 } else if (autoneg == AUTONEG_ENABLE)
863 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart); 862 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
864 else { 863 else {
865 if (netif_msg_link(tp)) { 864 netif_warn(tp, link, dev,
866 printk(KERN_WARNING "%s: " 865 "incorrect speed setting refused in TBI mode\n");
867 "incorrect speed setting refused in TBI mode\n",
868 dev->name);
869 }
870 ret = -EOPNOTSUPP; 866 ret = -EOPNOTSUPP;
871 } 867 }
872 868
@@ -901,9 +897,9 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
901 (tp->mac_version != RTL_GIGA_MAC_VER_15) && 897 (tp->mac_version != RTL_GIGA_MAC_VER_15) &&
902 (tp->mac_version != RTL_GIGA_MAC_VER_16)) { 898 (tp->mac_version != RTL_GIGA_MAC_VER_16)) {
903 giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF; 899 giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
904 } else if (netif_msg_link(tp)) { 900 } else {
905 printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n", 901 netif_info(tp, link, dev,
906 dev->name); 902 "PHY does not support 1000Mbps\n");
907 } 903 }
908 904
909 bmcr = BMCR_ANENABLE | BMCR_ANRESTART; 905 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
@@ -2705,8 +2701,7 @@ static void rtl8169_phy_timer(unsigned long __opaque)
2705 if (tp->link_ok(ioaddr)) 2701 if (tp->link_ok(ioaddr))
2706 goto out_unlock; 2702 goto out_unlock;
2707 2703
2708 if (netif_msg_link(tp)) 2704 netif_warn(tp, link, dev, "PHY reset until link up\n");
2709 printk(KERN_WARNING "%s: PHY reset until link up\n", dev->name);
2710 2705
2711 tp->phy_reset_enable(ioaddr); 2706 tp->phy_reset_enable(ioaddr);
2712 2707
@@ -2776,8 +2771,7 @@ static void rtl8169_phy_reset(struct net_device *dev,
2776 return; 2771 return;
2777 msleep(1); 2772 msleep(1);
2778 } 2773 }
2779 if (netif_msg_link(tp)) 2774 netif_err(tp, link, dev, "PHY reset failed\n");
2780 printk(KERN_ERR "%s: PHY reset failed.\n", dev->name);
2781} 2775}
2782 2776
2783static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) 2777static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
@@ -2811,8 +2805,8 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
2811 */ 2805 */
2812 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL); 2806 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL);
2813 2807
2814 if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp)) 2808 if (RTL_R8(PHYstatus) & TBI_Enable)
2815 printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name); 2809 netif_info(tp, link, dev, "TBI auto-negotiating\n");
2816} 2810}
2817 2811
2818static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) 2812static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
@@ -2980,6 +2974,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2980 void __iomem *ioaddr; 2974 void __iomem *ioaddr;
2981 unsigned int i; 2975 unsigned int i;
2982 int rc; 2976 int rc;
2977 int this_use_dac = use_dac;
2983 2978
2984 if (netif_msg_drv(&debug)) { 2979 if (netif_msg_drv(&debug)) {
2985 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", 2980 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
@@ -3012,8 +3007,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3012 /* enable device (incl. PCI PM wakeup and hotplug setup) */ 3007 /* enable device (incl. PCI PM wakeup and hotplug setup) */
3013 rc = pci_enable_device(pdev); 3008 rc = pci_enable_device(pdev);
3014 if (rc < 0) { 3009 if (rc < 0) {
3015 if (netif_msg_probe(tp)) 3010 netif_err(tp, probe, dev, "enable failure\n");
3016 dev_err(&pdev->dev, "enable failure\n");
3017 goto err_out_free_dev_1; 3011 goto err_out_free_dev_1;
3018 } 3012 }
3019 3013
@@ -3023,45 +3017,46 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3023 3017
3024 /* make sure PCI base addr 1 is MMIO */ 3018 /* make sure PCI base addr 1 is MMIO */
3025 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { 3019 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
3026 if (netif_msg_probe(tp)) { 3020 netif_err(tp, probe, dev,
3027 dev_err(&pdev->dev, 3021 "region #%d not an MMIO resource, aborting\n",
3028 "region #%d not an MMIO resource, aborting\n", 3022 region);
3029 region);
3030 }
3031 rc = -ENODEV; 3023 rc = -ENODEV;
3032 goto err_out_mwi_3; 3024 goto err_out_mwi_3;
3033 } 3025 }
3034 3026
3035 /* check for weird/broken PCI region reporting */ 3027 /* check for weird/broken PCI region reporting */
3036 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { 3028 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
3037 if (netif_msg_probe(tp)) { 3029 netif_err(tp, probe, dev,
3038 dev_err(&pdev->dev, 3030 "Invalid PCI region size(s), aborting\n");
3039 "Invalid PCI region size(s), aborting\n");
3040 }
3041 rc = -ENODEV; 3031 rc = -ENODEV;
3042 goto err_out_mwi_3; 3032 goto err_out_mwi_3;
3043 } 3033 }
3044 3034
3045 rc = pci_request_regions(pdev, MODULENAME); 3035 rc = pci_request_regions(pdev, MODULENAME);
3046 if (rc < 0) { 3036 if (rc < 0) {
3047 if (netif_msg_probe(tp)) 3037 netif_err(tp, probe, dev, "could not request regions\n");
3048 dev_err(&pdev->dev, "could not request regions.\n");
3049 goto err_out_mwi_3; 3038 goto err_out_mwi_3;
3050 } 3039 }
3051 3040
3052 tp->cp_cmd = PCIMulRW | RxChkSum; 3041 tp->cp_cmd = PCIMulRW | RxChkSum;
3053 3042
3043 tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3044 if (!tp->pcie_cap)
3045 netif_info(tp, probe, dev, "no PCI Express capability\n");
3046
3047 if (this_use_dac < 0)
3048 this_use_dac = tp->pcie_cap != 0;
3049
3054 if ((sizeof(dma_addr_t) > 4) && 3050 if ((sizeof(dma_addr_t) > 4) &&
3055 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { 3051 this_use_dac &&
3052 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3053 netif_info(tp, probe, dev, "using 64-bit DMA\n");
3056 tp->cp_cmd |= PCIDAC; 3054 tp->cp_cmd |= PCIDAC;
3057 dev->features |= NETIF_F_HIGHDMA; 3055 dev->features |= NETIF_F_HIGHDMA;
3058 } else { 3056 } else {
3059 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3057 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3060 if (rc < 0) { 3058 if (rc < 0) {
3061 if (netif_msg_probe(tp)) { 3059 netif_err(tp, probe, dev, "DMA configuration failed\n");
3062 dev_err(&pdev->dev,
3063 "DMA configuration failed.\n");
3064 }
3065 goto err_out_free_res_4; 3060 goto err_out_free_res_4;
3066 } 3061 }
3067 } 3062 }
@@ -3069,16 +3064,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3069 /* ioremap MMIO region */ 3064 /* ioremap MMIO region */
3070 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE); 3065 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
3071 if (!ioaddr) { 3066 if (!ioaddr) {
3072 if (netif_msg_probe(tp)) 3067 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
3073 dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
3074 rc = -EIO; 3068 rc = -EIO;
3075 goto err_out_free_res_4; 3069 goto err_out_free_res_4;
3076 } 3070 }
3077 3071
3078 tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3079 if (!tp->pcie_cap && netif_msg_probe(tp))
3080 dev_info(&pdev->dev, "no PCI Express capability\n");
3081
3082 RTL_W16(IntrMask, 0x0000); 3072 RTL_W16(IntrMask, 0x0000);
3083 3073
3084 /* Soft reset the chip. */ 3074 /* Soft reset the chip. */
@@ -3100,10 +3090,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3100 3090
3101 /* Use appropriate default if unknown */ 3091 /* Use appropriate default if unknown */
3102 if (tp->mac_version == RTL_GIGA_MAC_NONE) { 3092 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
3103 if (netif_msg_probe(tp)) { 3093 netif_notice(tp, probe, dev,
3104 dev_notice(&pdev->dev, 3094 "unknown MAC, using family default\n");
3105 "unknown MAC, using family default\n");
3106 }
3107 tp->mac_version = cfg->default_ver; 3095 tp->mac_version = cfg->default_ver;
3108 } 3096 }
3109 3097
@@ -3185,19 +3173,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3185 3173
3186 pci_set_drvdata(pdev, dev); 3174 pci_set_drvdata(pdev, dev);
3187 3175
3188 if (netif_msg_probe(tp)) { 3176 netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n",
3189 u32 xid = RTL_R32(TxConfig) & 0x9cf0f8ff; 3177 rtl_chip_info[tp->chipset].name,
3190 3178 dev->base_addr, dev->dev_addr,
3191 printk(KERN_INFO "%s: %s at 0x%lx, " 3179 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq);
3192 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
3193 "XID %08x IRQ %d\n",
3194 dev->name,
3195 rtl_chip_info[tp->chipset].name,
3196 dev->base_addr,
3197 dev->dev_addr[0], dev->dev_addr[1],
3198 dev->dev_addr[2], dev->dev_addr[3],
3199 dev->dev_addr[4], dev->dev_addr[5], xid, dev->irq);
3200 }
3201 3180
3202 rtl8169_init_phy(dev, tp); 3181 rtl8169_init_phy(dev, tp);
3203 3182
@@ -4136,10 +4115,10 @@ static void rtl8169_reinit_task(struct work_struct *work)
4136 4115
4137 ret = rtl8169_open(dev); 4116 ret = rtl8169_open(dev);
4138 if (unlikely(ret < 0)) { 4117 if (unlikely(ret < 0)) {
4139 if (net_ratelimit() && netif_msg_drv(tp)) { 4118 if (net_ratelimit())
4140 printk(KERN_ERR PFX "%s: reinit failure (status = %d)." 4119 netif_err(tp, drv, dev,
4141 " Rescheduling.\n", dev->name, ret); 4120 "reinit failure (status = %d). Rescheduling\n",
4142 } 4121 ret);
4143 rtl8169_schedule_work(dev, rtl8169_reinit_task); 4122 rtl8169_schedule_work(dev, rtl8169_reinit_task);
4144 } 4123 }
4145 4124
@@ -4169,10 +4148,8 @@ static void rtl8169_reset_task(struct work_struct *work)
4169 netif_wake_queue(dev); 4148 netif_wake_queue(dev);
4170 rtl8169_check_link_status(dev, tp, tp->mmio_addr); 4149 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
4171 } else { 4150 } else {
4172 if (net_ratelimit() && netif_msg_intr(tp)) { 4151 if (net_ratelimit())
4173 printk(KERN_EMERG PFX "%s: Rx buffers shortage\n", 4152 netif_emerg(tp, intr, dev, "Rx buffers shortage\n");
4174 dev->name);
4175 }
4176 rtl8169_schedule_work(dev, rtl8169_reset_task); 4153 rtl8169_schedule_work(dev, rtl8169_reset_task);
4177 } 4154 }
4178 4155
@@ -4260,11 +4237,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
4260 u32 opts1; 4237 u32 opts1;
4261 4238
4262 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) { 4239 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
4263 if (netif_msg_drv(tp)) { 4240 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
4264 printk(KERN_ERR
4265 "%s: BUG! Tx Ring full when queue awake!\n",
4266 dev->name);
4267 }
4268 goto err_stop; 4241 goto err_stop;
4269 } 4242 }
4270 4243
@@ -4326,11 +4299,8 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
4326 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 4299 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4327 pci_read_config_word(pdev, PCI_STATUS, &pci_status); 4300 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
4328 4301
4329 if (netif_msg_intr(tp)) { 4302 netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
4330 printk(KERN_ERR 4303 pci_cmd, pci_status);
4331 "%s: PCI error (cmd = 0x%04x, status = 0x%04x).\n",
4332 dev->name, pci_cmd, pci_status);
4333 }
4334 4304
4335 /* 4305 /*
4336 * The recovery sequence below admits a very elaborated explanation: 4306 * The recovery sequence below admits a very elaborated explanation:
@@ -4354,8 +4324,7 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
4354 4324
4355 /* The infamous DAC f*ckup only happens at boot time */ 4325 /* The infamous DAC f*ckup only happens at boot time */
4356 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) { 4326 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
4357 if (netif_msg_intr(tp)) 4327 netif_info(tp, intr, dev, "disabling PCI DAC\n");
4358 printk(KERN_INFO "%s: disabling PCI DAC.\n", dev->name);
4359 tp->cp_cmd &= ~PCIDAC; 4328 tp->cp_cmd &= ~PCIDAC;
4360 RTL_W16(CPlusCmd, tp->cp_cmd); 4329 RTL_W16(CPlusCmd, tp->cp_cmd);
4361 dev->features &= ~NETIF_F_HIGHDMA; 4330 dev->features &= ~NETIF_F_HIGHDMA;
@@ -4482,11 +4451,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
4482 if (status & DescOwn) 4451 if (status & DescOwn)
4483 break; 4452 break;
4484 if (unlikely(status & RxRES)) { 4453 if (unlikely(status & RxRES)) {
4485 if (netif_msg_rx_err(tp)) { 4454 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
4486 printk(KERN_INFO 4455 status);
4487 "%s: Rx ERROR. status = %08x\n",
4488 dev->name, status);
4489 }
4490 dev->stats.rx_errors++; 4456 dev->stats.rx_errors++;
4491 if (status & (RxRWT | RxRUNT)) 4457 if (status & (RxRWT | RxRUNT))
4492 dev->stats.rx_length_errors++; 4458 dev->stats.rx_length_errors++;
@@ -4549,8 +4515,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
4549 tp->cur_rx = cur_rx; 4515 tp->cur_rx = cur_rx;
4550 4516
4551 delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx); 4517 delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
4552 if (!delta && count && netif_msg_intr(tp)) 4518 if (!delta && count)
4553 printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name); 4519 netif_info(tp, intr, dev, "no Rx buffer allocated\n");
4554 tp->dirty_rx += delta; 4520 tp->dirty_rx += delta;
4555 4521
4556 /* 4522 /*
@@ -4560,8 +4526,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
4560 * after refill ? 4526 * after refill ?
4561 * - how do others driver handle this condition (Uh oh...). 4527 * - how do others driver handle this condition (Uh oh...).
4562 */ 4528 */
4563 if ((tp->dirty_rx + NUM_RX_DESC == tp->cur_rx) && netif_msg_intr(tp)) 4529 if (tp->dirty_rx + NUM_RX_DESC == tp->cur_rx)
4564 printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name); 4530 netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
4565 4531
4566 return count; 4532 return count;
4567} 4533}
@@ -4616,10 +4582,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4616 4582
4617 if (likely(napi_schedule_prep(&tp->napi))) 4583 if (likely(napi_schedule_prep(&tp->napi)))
4618 __napi_schedule(&tp->napi); 4584 __napi_schedule(&tp->napi);
4619 else if (netif_msg_intr(tp)) { 4585 else
4620 printk(KERN_INFO "%s: interrupt %04x in poll\n", 4586 netif_info(tp, intr, dev,
4621 dev->name, status); 4587 "interrupt %04x in poll\n", status);
4622 }
4623 } 4588 }
4624 4589
4625 /* We only get a new MSI interrupt when all active irq 4590 /* We only get a new MSI interrupt when all active irq
@@ -4755,27 +4720,22 @@ static void rtl_set_rx_mode(struct net_device *dev)
4755 4720
4756 if (dev->flags & IFF_PROMISC) { 4721 if (dev->flags & IFF_PROMISC) {
4757 /* Unconditionally log net taps. */ 4722 /* Unconditionally log net taps. */
4758 if (netif_msg_link(tp)) { 4723 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4759 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
4760 dev->name);
4761 }
4762 rx_mode = 4724 rx_mode =
4763 AcceptBroadcast | AcceptMulticast | AcceptMyPhys | 4725 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4764 AcceptAllPhys; 4726 AcceptAllPhys;
4765 mc_filter[1] = mc_filter[0] = 0xffffffff; 4727 mc_filter[1] = mc_filter[0] = 0xffffffff;
4766 } else if ((dev->mc_count > multicast_filter_limit) || 4728 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4767 (dev->flags & IFF_ALLMULTI)) { 4729 (dev->flags & IFF_ALLMULTI)) {
4768 /* Too many to filter perfectly -- accept all multicasts. */ 4730 /* Too many to filter perfectly -- accept all multicasts. */
4769 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 4731 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4770 mc_filter[1] = mc_filter[0] = 0xffffffff; 4732 mc_filter[1] = mc_filter[0] = 0xffffffff;
4771 } else { 4733 } else {
4772 struct dev_mc_list *mclist; 4734 struct dev_mc_list *mclist;
4773 unsigned int i;
4774 4735
4775 rx_mode = AcceptBroadcast | AcceptMyPhys; 4736 rx_mode = AcceptBroadcast | AcceptMyPhys;
4776 mc_filter[1] = mc_filter[0] = 0; 4737 mc_filter[1] = mc_filter[0] = 0;
4777 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 4738 netdev_for_each_mc_addr(mclist, dev) {
4778 i++, mclist = mclist->next) {
4779 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 4739 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
4780 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 4740 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4781 rx_mode |= AcceptMulticast; 4741 rx_mode |= AcceptMulticast;
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index 1c257098d0a6..266baf534964 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1688,7 +1688,7 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1688 } 1688 }
1689} 1689}
1690 1690
1691static struct pci_device_id rr_pci_tbl[] = { 1691static DEFINE_PCI_DEVICE_TABLE(rr_pci_tbl) = {
1692 { PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER, 1692 { PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER,
1693 PCI_ANY_ID, PCI_ANY_ID, }, 1693 PCI_ANY_ID, PCI_ANY_ID, },
1694 { 0,} 1694 { 0,}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 3c4836d0898f..43bc66aa8405 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -523,7 +523,7 @@ module_param_array(rts_frm_len, uint, NULL, 0);
523 * S2IO device table. 523 * S2IO device table.
524 * This table lists all the devices that this driver supports. 524 * This table lists all the devices that this driver supports.
525 */ 525 */
526static struct pci_device_id s2io_tbl[] __devinitdata = { 526static DEFINE_PCI_DEVICE_TABLE(s2io_tbl) = {
527 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN, 527 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
528 PCI_ANY_ID, PCI_ANY_ID}, 528 PCI_ANY_ID, PCI_ANY_ID},
529 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI, 529 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
@@ -5055,8 +5055,8 @@ static void s2io_set_multicast(struct net_device *dev)
5055 } 5055 }
5056 5056
5057 /* Update individual M_CAST address list */ 5057 /* Update individual M_CAST address list */
5058 if ((!sp->m_cast_flg) && dev->mc_count) { 5058 if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
5059 if (dev->mc_count > 5059 if (netdev_mc_count(dev) >
5060 (config->max_mc_addr - config->max_mac_addr)) { 5060 (config->max_mc_addr - config->max_mac_addr)) {
5061 DBG_PRINT(ERR_DBG, 5061 DBG_PRINT(ERR_DBG,
5062 "%s: No more Rx filters can be added - " 5062 "%s: No more Rx filters can be added - "
@@ -5066,7 +5066,7 @@ static void s2io_set_multicast(struct net_device *dev)
5066 } 5066 }
5067 5067
5068 prev_cnt = sp->mc_addr_count; 5068 prev_cnt = sp->mc_addr_count;
5069 sp->mc_addr_count = dev->mc_count; 5069 sp->mc_addr_count = netdev_mc_count(dev);
5070 5070
5071 /* Clear out the previous list of Mc in the H/W. */ 5071 /* Clear out the previous list of Mc in the H/W. */
5072 for (i = 0; i < prev_cnt; i++) { 5072 for (i = 0; i < prev_cnt; i++) {
@@ -5092,8 +5092,8 @@ static void s2io_set_multicast(struct net_device *dev)
5092 } 5092 }
5093 5093
5094 /* Create the new Rx filter list and update the same in H/W. */ 5094 /* Create the new Rx filter list and update the same in H/W. */
5095 for (i = 0, mclist = dev->mc_list; i < dev->mc_count; 5095 i = 0;
5096 i++, mclist = mclist->next) { 5096 netdev_for_each_mc_addr(mclist, dev) {
5097 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr, 5097 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
5098 ETH_ALEN); 5098 ETH_ALEN);
5099 mac_addr = 0; 5099 mac_addr = 0;
@@ -5121,6 +5121,7 @@ static void s2io_set_multicast(struct net_device *dev)
5121 dev->name); 5121 dev->name);
5122 return; 5122 return;
5123 } 5123 }
5124 i++;
5124 } 5125 }
5125 } 5126 }
5126} 5127}
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 564d4d7f855b..9944e5d662c0 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2161,13 +2161,13 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
2161 * XXX if the table overflows */ 2161 * XXX if the table overflows */
2162 2162
2163 idx = 1; /* skip station address */ 2163 idx = 1; /* skip station address */
2164 mclist = dev->mc_list; 2164 netdev_for_each_mc_addr(mclist, dev) {
2165 while (mclist && (idx < MAC_ADDR_COUNT)) { 2165 if (idx == MAC_ADDR_COUNT)
2166 break;
2166 reg = sbmac_addr2reg(mclist->dmi_addr); 2167 reg = sbmac_addr2reg(mclist->dmi_addr);
2167 port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t)); 2168 port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
2168 __raw_writeq(reg, port); 2169 __raw_writeq(reg, port);
2169 idx++; 2170 idx++;
2170 mclist = mclist->next;
2171 } 2171 }
2172 2172
2173 /* 2173 /*
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index e35050322f97..d87c4787fffa 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -429,13 +429,13 @@ static void _sc92031_set_mar(struct net_device *dev)
429 u32 mar0 = 0, mar1 = 0; 429 u32 mar0 = 0, mar1 = 0;
430 430
431 if ((dev->flags & IFF_PROMISC) || 431 if ((dev->flags & IFF_PROMISC) ||
432 dev->mc_count > multicast_filter_limit || 432 netdev_mc_count(dev) > multicast_filter_limit ||
433 (dev->flags & IFF_ALLMULTI)) 433 (dev->flags & IFF_ALLMULTI))
434 mar0 = mar1 = 0xffffffff; 434 mar0 = mar1 = 0xffffffff;
435 else if (dev->flags & IFF_MULTICAST) { 435 else if (dev->flags & IFF_MULTICAST) {
436 struct dev_mc_list *mc_list; 436 struct dev_mc_list *mc_list;
437 437
438 for (mc_list = dev->mc_list; mc_list; mc_list = mc_list->next) { 438 netdev_for_each_mc_addr(mc_list, dev) {
439 u32 crc; 439 u32 crc;
440 unsigned bit = 0; 440 unsigned bit = 0;
441 441
@@ -1589,7 +1589,7 @@ out:
1589 return 0; 1589 return 0;
1590} 1590}
1591 1591
1592static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = { 1592static DEFINE_PCI_DEVICE_TABLE(sc92031_pci_device_id_table) = {
1593 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) }, 1593 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) },
1594 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) }, 1594 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) },
1595 { PCI_DEVICE(0x1088, 0x2031) }, 1595 { PCI_DEVICE(0x1088, 0x2031) },
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 46997e177ee3..88f2fb193abe 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1602,11 +1602,10 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
1602static void efx_set_multicast_list(struct net_device *net_dev) 1602static void efx_set_multicast_list(struct net_device *net_dev)
1603{ 1603{
1604 struct efx_nic *efx = netdev_priv(net_dev); 1604 struct efx_nic *efx = netdev_priv(net_dev);
1605 struct dev_mc_list *mc_list = net_dev->mc_list; 1605 struct dev_mc_list *mc_list;
1606 union efx_multicast_hash *mc_hash = &efx->multicast_hash; 1606 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
1607 u32 crc; 1607 u32 crc;
1608 int bit; 1608 int bit;
1609 int i;
1610 1609
1611 efx->promiscuous = !!(net_dev->flags & IFF_PROMISC); 1610 efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
1612 1611
@@ -1615,11 +1614,10 @@ static void efx_set_multicast_list(struct net_device *net_dev)
1615 memset(mc_hash, 0xff, sizeof(*mc_hash)); 1614 memset(mc_hash, 0xff, sizeof(*mc_hash));
1616 } else { 1615 } else {
1617 memset(mc_hash, 0x00, sizeof(*mc_hash)); 1616 memset(mc_hash, 0x00, sizeof(*mc_hash));
1618 for (i = 0; i < net_dev->mc_count; i++) { 1617 netdev_for_each_mc_addr(mc_list, net_dev) {
1619 crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr); 1618 crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
1620 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); 1619 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
1621 set_bit_le(bit, mc_hash->byte); 1620 set_bit_le(bit, mc_hash->byte);
1622 mc_list = mc_list->next;
1623 } 1621 }
1624 1622
1625 /* Broadcast packets go through the multicast hash filter. 1623 /* Broadcast packets go through the multicast hash filter.
@@ -1940,7 +1938,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1940 **************************************************************************/ 1938 **************************************************************************/
1941 1939
1942/* PCI device ID table */ 1940/* PCI device ID table */
1943static struct pci_device_id efx_pci_table[] __devinitdata = { 1941static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
1944 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID), 1942 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
1945 .driver_data = (unsigned long) &falcon_a1_nic_type}, 1943 .driver_data = (unsigned long) &falcon_a1_nic_type},
1946 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID), 1944 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index a615ac051530..7eff0a615cb3 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -79,8 +79,6 @@ extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
79 79
80/* Global */ 80/* Global */
81extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); 81extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
82extern void efx_suspend(struct efx_nic *efx);
83extern void efx_resume(struct efx_nic *efx);
84extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, 82extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs,
85 int rx_usecs, bool rx_adaptive); 83 int rx_usecs, bool rx_adaptive);
86extern int efx_request_power(struct efx_nic *efx, int mw, const char *name); 84extern int efx_request_power(struct efx_nic *efx, int mw, const char *name);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 6c0bbed8c477..d9f9c02a928e 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -196,7 +196,7 @@ int efx_ethtool_get_settings(struct net_device *net_dev,
196 efx->phy_op->get_settings(efx, ecmd); 196 efx->phy_op->get_settings(efx, ecmd);
197 mutex_unlock(&efx->mac_lock); 197 mutex_unlock(&efx->mac_lock);
198 198
199 /* Falcon GMAC does not support 1000Mbps HD */ 199 /* GMAC does not support 1000Mbps HD */
200 ecmd->supported &= ~SUPPORTED_1000baseT_Half; 200 ecmd->supported &= ~SUPPORTED_1000baseT_Half;
201 /* Both MACs support pause frames (bidirectional and respond-only) */ 201 /* Both MACs support pause frames (bidirectional and respond-only) */
202 ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; 202 ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
@@ -216,7 +216,7 @@ int efx_ethtool_set_settings(struct net_device *net_dev,
216 struct efx_nic *efx = netdev_priv(net_dev); 216 struct efx_nic *efx = netdev_priv(net_dev);
217 int rc; 217 int rc;
218 218
219 /* Falcon GMAC does not support 1000Mbps HD */ 219 /* GMAC does not support 1000Mbps HD */
220 if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) { 220 if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) {
221 EFX_LOG(efx, "rejecting unsupported 1000Mbps HD" 221 EFX_LOG(efx, "rejecting unsupported 1000Mbps HD"
222 " setting\n"); 222 " setting\n");
@@ -342,8 +342,8 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
342 unsigned int n = 0, i; 342 unsigned int n = 0, i;
343 enum efx_loopback_mode mode; 343 enum efx_loopback_mode mode;
344 344
345 efx_fill_test(n++, strings, data, &tests->mdio, 345 efx_fill_test(n++, strings, data, &tests->phy_alive,
346 "core", 0, "mdio", NULL); 346 "phy", 0, "alive", NULL);
347 efx_fill_test(n++, strings, data, &tests->nvram, 347 efx_fill_test(n++, strings, data, &tests->nvram,
348 "core", 0, "nvram", NULL); 348 "core", 0, "nvram", NULL);
349 efx_fill_test(n++, strings, data, &tests->interrupt, 349 efx_fill_test(n++, strings, data, &tests->interrupt,
@@ -379,7 +379,7 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
379 if (name == NULL) 379 if (name == NULL)
380 break; 380 break;
381 381
382 efx_fill_test(n++, strings, data, &tests->phy[i], 382 efx_fill_test(n++, strings, data, &tests->phy_ext[i],
383 "phy", 0, name, NULL); 383 "phy", 0, name, NULL);
384 } 384 }
385 } 385 }
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 9d009c46e962..1b8d83657aaa 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -909,6 +909,8 @@ static int falcon_probe_port(struct efx_nic *efx)
909 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; 909 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
910 else 910 else
911 efx->wanted_fc = EFX_FC_RX; 911 efx->wanted_fc = EFX_FC_RX;
912 if (efx->mdio.mmds & MDIO_DEVS_AN)
913 efx->wanted_fc |= EFX_FC_AUTO;
912 914
913 /* Allocate buffer for stats */ 915 /* Allocate buffer for stats */
914 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, 916 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
@@ -1006,7 +1008,7 @@ static int falcon_test_nvram(struct efx_nic *efx)
1006 1008
1007static const struct efx_nic_register_test falcon_b0_register_tests[] = { 1009static const struct efx_nic_register_test falcon_b0_register_tests[] = {
1008 { FR_AZ_ADR_REGION, 1010 { FR_AZ_ADR_REGION,
1009 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) }, 1011 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
1010 { FR_AZ_RX_CFG, 1012 { FR_AZ_RX_CFG,
1011 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) }, 1013 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
1012 { FR_AZ_TX_CFG, 1014 { FR_AZ_TX_CFG,
@@ -1728,7 +1730,7 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
1728 1730
1729/************************************************************************** 1731/**************************************************************************
1730 * 1732 *
1731 * Revision-dependent attributes used by efx.c 1733 * Revision-dependent attributes used by efx.c and nic.c
1732 * 1734 *
1733 ************************************************************************** 1735 **************************************************************************
1734 */ 1736 */
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index f66b3da6ddff..c48669c77414 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -896,29 +896,73 @@ fail:
896 return rc; 896 return rc;
897} 897}
898 898
899int efx_mcdi_handle_assertion(struct efx_nic *efx) 899static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
900{
901 u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN];
902 u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN];
903 int rc;
904
905 MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
906
907 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
908 outbuf, sizeof(outbuf), NULL);
909 if (rc)
910 return rc;
911
912 switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
913 case MC_CMD_NVRAM_TEST_PASS:
914 case MC_CMD_NVRAM_TEST_NOTSUPP:
915 return 0;
916 default:
917 return -EIO;
918 }
919}
920
921int efx_mcdi_nvram_test_all(struct efx_nic *efx)
922{
923 u32 nvram_types;
924 unsigned int type;
925 int rc;
926
927 rc = efx_mcdi_nvram_types(efx, &nvram_types);
928 if (rc)
929 return rc;
930
931 type = 0;
932 while (nvram_types != 0) {
933 if (nvram_types & 1) {
934 rc = efx_mcdi_nvram_test(efx, type);
935 if (rc)
936 return rc;
937 }
938 type++;
939 nvram_types >>= 1;
940 }
941
942 return 0;
943}
944
945static int efx_mcdi_read_assertion(struct efx_nic *efx)
900{ 946{
901 union { 947 u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN];
902 u8 asserts[MC_CMD_GET_ASSERTS_IN_LEN]; 948 u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN];
903 u8 reboot[MC_CMD_REBOOT_IN_LEN];
904 } inbuf;
905 u8 assertion[MC_CMD_GET_ASSERTS_OUT_LEN];
906 unsigned int flags, index, ofst; 949 unsigned int flags, index, ofst;
907 const char *reason; 950 const char *reason;
908 size_t outlen; 951 size_t outlen;
909 int retry; 952 int retry;
910 int rc; 953 int rc;
911 954
912 /* Check if the MC is in the assertion handler, retrying twice. Once 955 /* Attempt to read any stored assertion state before we reboot
956 * the mcfw out of the assertion handler. Retry twice, once
913 * because a boot-time assertion might cause this command to fail 957 * because a boot-time assertion might cause this command to fail
914 * with EINTR. And once again because GET_ASSERTS can race with 958 * with EINTR. And once again because GET_ASSERTS can race with
915 * MC_CMD_REBOOT running on the other port. */ 959 * MC_CMD_REBOOT running on the other port. */
916 retry = 2; 960 retry = 2;
917 do { 961 do {
918 MCDI_SET_DWORD(inbuf.asserts, GET_ASSERTS_IN_CLEAR, 0); 962 MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
919 rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS, 963 rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
920 inbuf.asserts, MC_CMD_GET_ASSERTS_IN_LEN, 964 inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
921 assertion, sizeof(assertion), &outlen); 965 outbuf, sizeof(outbuf), &outlen);
922 } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); 966 } while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
923 967
924 if (rc) 968 if (rc)
@@ -926,21 +970,11 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
926 if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) 970 if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
927 return -EINVAL; 971 return -EINVAL;
928 972
929 flags = MCDI_DWORD(assertion, GET_ASSERTS_OUT_GLOBAL_FLAGS); 973 /* Print out any recorded assertion state */
974 flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
930 if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) 975 if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
931 return 0; 976 return 0;
932 977
933 /* Reset the hardware atomically such that only one port with succeed.
934 * This command will succeed if a reboot is no longer required (because
935 * the other port did it first), but fail with EIO if it succeeds.
936 */
937 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
938 MCDI_SET_DWORD(inbuf.reboot, REBOOT_IN_FLAGS,
939 MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
940 efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf.reboot, MC_CMD_REBOOT_IN_LEN,
941 NULL, 0, NULL);
942
943 /* Print out the assertion */
944 reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) 978 reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
945 ? "system-level assertion" 979 ? "system-level assertion"
946 : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) 980 : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
@@ -949,20 +983,45 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
949 ? "watchdog reset" 983 ? "watchdog reset"
950 : "unknown assertion"; 984 : "unknown assertion";
951 EFX_ERR(efx, "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, 985 EFX_ERR(efx, "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
952 MCDI_DWORD(assertion, GET_ASSERTS_OUT_SAVED_PC_OFFS), 986 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
953 MCDI_DWORD(assertion, GET_ASSERTS_OUT_THREAD_OFFS)); 987 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
954 988
955 /* Print out the registers */ 989 /* Print out the registers */
956 ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; 990 ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
957 for (index = 1; index < 32; index++) { 991 for (index = 1; index < 32; index++) {
958 EFX_ERR(efx, "R%.2d (?): 0x%.8x\n", index, 992 EFX_ERR(efx, "R%.2d (?): 0x%.8x\n", index,
959 MCDI_DWORD2(assertion, ofst)); 993 MCDI_DWORD2(outbuf, ofst));
960 ofst += sizeof(efx_dword_t); 994 ofst += sizeof(efx_dword_t);
961 } 995 }
962 996
963 return 0; 997 return 0;
964} 998}
965 999
1000static void efx_mcdi_exit_assertion(struct efx_nic *efx)
1001{
1002 u8 inbuf[MC_CMD_REBOOT_IN_LEN];
1003
1004 /* Atomically reboot the mcfw out of the assertion handler */
1005 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
1006 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
1007 MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
1008 efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
1009 NULL, 0, NULL);
1010}
1011
1012int efx_mcdi_handle_assertion(struct efx_nic *efx)
1013{
1014 int rc;
1015
1016 rc = efx_mcdi_read_assertion(efx);
1017 if (rc)
1018 return rc;
1019
1020 efx_mcdi_exit_assertion(efx);
1021
1022 return 0;
1023}
1024
966void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) 1025void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
967{ 1026{
968 u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN]; 1027 u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN];
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h
index 10ce98f4c0fb..f1f89ad4075a 100644
--- a/drivers/net/sfc/mcdi.h
+++ b/drivers/net/sfc/mcdi.h
@@ -116,6 +116,7 @@ extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
116 loff_t offset, size_t length); 116 loff_t offset, size_t length);
117extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx, 117extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
118 unsigned int type); 118 unsigned int type);
119extern int efx_mcdi_nvram_test_all(struct efx_nic *efx);
119extern int efx_mcdi_handle_assertion(struct efx_nic *efx); 120extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
120extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); 121extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
121extern int efx_mcdi_reset_port(struct efx_nic *efx); 122extern int efx_mcdi_reset_port(struct efx_nic *efx);
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index 73e71f420624..bd59302695b3 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -786,16 +786,18 @@
786#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0 786#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
787#define MC_CMD_GET_PHY_CFG_PRESENT_LBN 0 787#define MC_CMD_GET_PHY_CFG_PRESENT_LBN 0
788#define MC_CMD_GET_PHY_CFG_PRESENT_WIDTH 1 788#define MC_CMD_GET_PHY_CFG_PRESENT_WIDTH 1
789#define MC_CMD_GET_PHY_CFG_SHORTBIST_LBN 1 789#define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN 1
790#define MC_CMD_GET_PHY_CFG_SHORTBIST_WIDTH 1 790#define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_WIDTH 1
791#define MC_CMD_GET_PHY_CFG_LONGBIST_LBN 2 791#define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN 2
792#define MC_CMD_GET_PHY_CFG_LONGBIST_WIDTH 1 792#define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_WIDTH 1
793#define MC_CMD_GET_PHY_CFG_LOWPOWER_LBN 3 793#define MC_CMD_GET_PHY_CFG_LOWPOWER_LBN 3
794#define MC_CMD_GET_PHY_CFG_LOWPOWER_WIDTH 1 794#define MC_CMD_GET_PHY_CFG_LOWPOWER_WIDTH 1
795#define MC_CMD_GET_PHY_CFG_POWEROFF_LBN 4 795#define MC_CMD_GET_PHY_CFG_POWEROFF_LBN 4
796#define MC_CMD_GET_PHY_CFG_POWEROFF_WIDTH 1 796#define MC_CMD_GET_PHY_CFG_POWEROFF_WIDTH 1
797#define MC_CMD_GET_PHY_CFG_TXDIS_LBN 5 797#define MC_CMD_GET_PHY_CFG_TXDIS_LBN 5
798#define MC_CMD_GET_PHY_CFG_TXDIS_WIDTH 1 798#define MC_CMD_GET_PHY_CFG_TXDIS_WIDTH 1
799#define MC_CMD_GET_PHY_CFG_BIST_LBN 6
800#define MC_CMD_GET_PHY_CFG_BIST_WIDTH 1
799#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4 801#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
800/* Bitmask of supported capabilities */ 802/* Bitmask of supported capabilities */
801#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8 803#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
@@ -832,7 +834,7 @@
832#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52 834#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
833#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20 835#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
834 836
835/* MC_CMD_START_PHY_BIST: 837/* MC_CMD_START_BIST:
836 * Start a BIST test on the PHY. 838 * Start a BIST test on the PHY.
837 * 839 *
838 * Locks required: PHY_LOCK if doing a PHY BIST 840 * Locks required: PHY_LOCK if doing a PHY BIST
@@ -840,34 +842,71 @@
840 */ 842 */
841#define MC_CMD_START_BIST 0x25 843#define MC_CMD_START_BIST 0x25
842#define MC_CMD_START_BIST_IN_LEN 4 844#define MC_CMD_START_BIST_IN_LEN 4
843#define MC_CMD_START_BIST_TYPE_OFST 0 845#define MC_CMD_START_BIST_IN_TYPE_OFST 0
846#define MC_CMD_START_BIST_OUT_LEN 0
844 847
845/* Run the PHY's short BIST */ 848/* Run the PHY's short cable BIST */
846#define MC_CMD_PHY_BIST_SHORT 1 849#define MC_CMD_PHY_BIST_CABLE_SHORT 1
847/* Run the PHY's long BIST */ 850/* Run the PHY's long cable BIST */
848#define MC_CMD_PHY_BIST_LONG 2 851#define MC_CMD_PHY_BIST_CABLE_LONG 2
849/* Run BIST on the currently selected BPX Serdes (XAUI or XFI) */ 852/* Run BIST on the currently selected BPX Serdes (XAUI or XFI) */
850#define MC_CMD_BPX_SERDES_BIST 3 853#define MC_CMD_BPX_SERDES_BIST 3
854/* Run the MC loopback tests */
855#define MC_CMD_MC_LOOPBACK_BIST 4
856/* Run the PHY's standard BIST */
857#define MC_CMD_PHY_BIST 5
851 858
852/* MC_CMD_POLL_PHY_BIST: (variadic output) 859/* MC_CMD_POLL_PHY_BIST: (variadic output)
853 * Poll for BIST completion 860 * Poll for BIST completion
854 * 861 *
855 * Returns a single status code, and a binary blob of phy-specific 862 * Returns a single status code, and optionally some PHY specific
856 * bist output. If the driver can't succesfully parse the BIST output, 863 * bist output. The driver should only consume the BIST output
857 * it should still respect the Pass/Fail in OUT.RESULT. 864 * after validating OUTLEN and PHY_CFG.PHY_TYPE.
858 * 865 *
859 * Locks required: PHY_LOCK if doing a PHY BIST 866 * If a driver can't succesfully parse the BIST output, it should
867 * still respect the pass/Fail in OUT.RESULT
868 *
869 * Locks required: PHY_LOCK if doing a PHY BIST
860 * Return code: 0, EACCES (if PHY_LOCK is not held) 870 * Return code: 0, EACCES (if PHY_LOCK is not held)
861 */ 871 */
862#define MC_CMD_POLL_BIST 0x26 872#define MC_CMD_POLL_BIST 0x26
863#define MC_CMD_POLL_BIST_IN_LEN 0 873#define MC_CMD_POLL_BIST_IN_LEN 0
864#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN 874#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN
875#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 40
876#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
865#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 877#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
866#define MC_CMD_POLL_BIST_RUNNING 1 878#define MC_CMD_POLL_BIST_RUNNING 1
867#define MC_CMD_POLL_BIST_PASSED 2 879#define MC_CMD_POLL_BIST_PASSED 2
868#define MC_CMD_POLL_BIST_FAILED 3 880#define MC_CMD_POLL_BIST_FAILED 3
869#define MC_CMD_POLL_BIST_TIMEOUT 4 881#define MC_CMD_POLL_BIST_TIMEOUT 4
882/* Generic: */
870#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4 883#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
884/* SFT9001-specific: */
885/* (offset 4 unused?) */
886#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 8
887#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 12
888#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 16
889#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 20
890#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 24
891#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 28
892#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 32
893#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 36
894#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 1
895#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 2
896#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 3
897#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 4
898#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 9
899/* mrsfp "PHY" driver: */
900#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
901#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0
902#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 1
903#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 2
904#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 3
905#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 4
906#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 5
907#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 6
908#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 7
909#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 8
871 910
872/* MC_CMD_PHY_SPI: (variadic in, variadic out) 911/* MC_CMD_PHY_SPI: (variadic in, variadic out)
873 * Read/Write/Erase the PHY SPI device 912 * Read/Write/Erase the PHY SPI device
@@ -1206,6 +1245,13 @@
1206#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST \ 1245#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST \
1207 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 178) 1246 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 178)
1208 1247
1248#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST \
1249 MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
1250#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
1251#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
1252#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
1253#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1
1254
1209#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4 1255#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
1210#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0 1256#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
1211 1257
@@ -1216,7 +1262,8 @@
1216#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 1262#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
1217#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 1263#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
1218#define MC_CMD_WOL_TYPE_BITMAP 0x5 1264#define MC_CMD_WOL_TYPE_BITMAP 0x5
1219#define MC_CMD_WOL_TYPE_MAX 0x6 1265#define MC_CMD_WOL_TYPE_LINK 0x6
1266#define MC_CMD_WOL_TYPE_MAX 0x7
1220 1267
1221#define MC_CMD_FILTER_MODE_SIMPLE 0x0 1268#define MC_CMD_FILTER_MODE_SIMPLE 0x0
1222#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff 1269#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff
@@ -1357,14 +1404,24 @@
1357 * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held) 1404 * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held)
1358 */ 1405 */
1359#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c 1406#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
1360#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 4 1407#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
1361#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0 1408#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
1409#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
1362#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0 1410#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
1363 1411
1364/* MC_CMD_REBOOT: 1412/* MC_CMD_REBOOT:
1365 * Reboot the MC. The AFTER_ASSERTION flag is intended to be used 1413 * Reboot the MC.
1366 * when the driver notices an assertion failure, to allow two ports to 1414 *
1367 * both recover (semi-)gracefully. 1415 * The AFTER_ASSERTION flag is intended to be used when the driver notices
1416 * an assertion failure (at which point it is expected to perform a complete
1417 * tear down and reinitialise), to allow both ports to reset the MC once
1418 * in an atomic fashion.
1419 *
1420 * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
1421 * which means that they will automatically reboot out of the assertion
1422 * handler, so this is in practise an optional operation. It is still
1423 * recommended that drivers execute this to support custom firmwares
1424 * with REBOOT_ON_ASSERT=0.
1368 * 1425 *
1369 * Locks required: NONE 1426 * Locks required: NONE
1370 * Returns: Nothing. You get back a response with ERR=1, DATALEN=0 1427 * Returns: Nothing. You get back a response with ERR=1, DATALEN=0
@@ -1469,11 +1526,10 @@
1469 ((_ofst) + 6) 1526 ((_ofst) + 6)
1470 1527
1471/* MC_CMD_READ_SENSORS 1528/* MC_CMD_READ_SENSORS
1472 * Returns the current (value, state) for each sensor 1529 * Returns the current reading from each sensor
1473 * 1530 *
1474 * Returns the current (value, state) [each 16bit] of each sensor supported by 1531 * Returns a sparse array of sensor readings (indexed by the sensor
1475 * this board, by DMA'ing a sparse array (indexed by the sensor type) into host 1532 * type) into host memory. Each array element is a dword.
1476 * memory.
1477 * 1533 *
1478 * The MC will send a SENSOREVT event every time any sensor changes state. The 1534 * The MC will send a SENSOREVT event every time any sensor changes state. The
1479 * driver is responsible for ensuring that it doesn't miss any events. The board 1535 * driver is responsible for ensuring that it doesn't miss any events. The board
@@ -1486,6 +1542,12 @@
1486#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4 1542#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
1487#define MC_CMD_READ_SENSORS_OUT_LEN 0 1543#define MC_CMD_READ_SENSORS_OUT_LEN 0
1488 1544
1545/* Sensor reading fields */
1546#define MC_CMD_READ_SENSOR_VALUE_LBN 0
1547#define MC_CMD_READ_SENSOR_VALUE_WIDTH 16
1548#define MC_CMD_READ_SENSOR_STATE_LBN 16
1549#define MC_CMD_READ_SENSOR_STATE_WIDTH 8
1550
1489 1551
1490/* MC_CMD_GET_PHY_STATE: 1552/* MC_CMD_GET_PHY_STATE:
1491 * Report current state of PHY. A "zombie" PHY is a PHY that has failed to 1553 * Report current state of PHY. A "zombie" PHY is a PHY that has failed to
@@ -1577,4 +1639,98 @@
1577#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0 1639#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
1578#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0 1640#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
1579 1641
1642
1643/* MC_CMD_TEST_ASSERT:
1644 * Deliberately trigger an assert-detonation in the firmware for testing
1645 * purposes (i.e. to allow tests that the driver copes gracefully).
1646 *
1647 * Locks required: None
1648 * Returns: 0
1649 */
1650
1651#define MC_CMD_TESTASSERT 0x49
1652#define MC_CMD_TESTASSERT_IN_LEN 0
1653#define MC_CMD_TESTASSERT_OUT_LEN 0
1654
1655/* MC_CMD_WORKAROUND 0x4a
1656 *
1657 * Enable/Disable a given workaround. The mcfw will return EINVAL if it
1658 * doesn't understand the given workaround number - which should not
1659 * be treated as a hard error by client code.
1660 *
1661 * This op does not imply any semantics about each workaround, that's between
1662 * the driver and the mcfw on a per-workaround basis.
1663 *
1664 * Locks required: None
1665 * Returns: 0, EINVAL
1666 */
1667#define MC_CMD_WORKAROUND 0x4a
1668#define MC_CMD_WORKAROUND_IN_LEN 8
1669#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
1670#define MC_CMD_WORKAROUND_BUG17230 1
1671#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
1672#define MC_CMD_WORKAROUND_OUT_LEN 0
1673
1674/* MC_CMD_GET_PHY_MEDIA_INFO:
1675 * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
1676 * SFP+ PHYs).
1677 *
1678 * The "media type" can be found via GET_PHY_CFG (GET_PHY_CFG_OUT_MEDIA_TYPE);
1679 * the valid "page number" input values, and the output data, are interpreted
1680 * on a per-type basis.
1681 *
1682 * For SFP+: PAGE=0 or 1 returns a 128-byte block read from module I2C address
1683 * 0xA0 offset 0 or 0x80.
1684 * Anything else: currently undefined.
1685 *
1686 * Locks required: None
1687 * Return code: 0
1688 */
1689#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
1690#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
1691#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
1692#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(_num_bytes) (4 + (_num_bytes))
1693#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
1694#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
1695
1696/* MC_CMD_NVRAM_TEST:
1697 * Test a particular NVRAM partition for valid contents (where "valid"
1698 * depends on the type of partition).
1699 *
1700 * Locks required: None
1701 * Return code: 0
1702 */
1703#define MC_CMD_NVRAM_TEST 0x4c
1704#define MC_CMD_NVRAM_TEST_IN_LEN 4
1705#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
1706#define MC_CMD_NVRAM_TEST_OUT_LEN 4
1707#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
1708#define MC_CMD_NVRAM_TEST_PASS 0
1709#define MC_CMD_NVRAM_TEST_FAIL 1
1710#define MC_CMD_NVRAM_TEST_NOTSUPP 2
1711
1712/* MC_CMD_MRSFP_TWEAK: (debug)
1713 * Read status and/or set parameters for the "mrsfp" driver in mr_rusty builds.
1714 * I2C I/O expander bits are always read; if equaliser parameters are supplied,
1715 * they are configured first.
1716 *
1717 * Locks required: None
1718 * Return code: 0, EINVAL
1719 */
1720#define MC_CMD_MRSFP_TWEAK 0x4d
1721#define MC_CMD_MRSFP_TWEAK_IN_LEN_READ_ONLY 0
1722#define MC_CMD_MRSFP_TWEAK_IN_LEN_EQ_CONFIG 16
1723#define MC_CMD_MRSFP_TWEAK_IN_TXEQ_LEVEL_OFST 0 /* 0-6 low->high de-emph. */
1724#define MC_CMD_MRSFP_TWEAK_IN_TXEQ_DT_CFG_OFST 4 /* 0-8 low->high ref.V */
1725#define MC_CMD_MRSFP_TWEAK_IN_RXEQ_BOOST_OFST 8 /* 0-8 low->high boost */
1726#define MC_CMD_MRSFP_TWEAK_IN_RXEQ_DT_CFG_OFST 12 /* 0-8 low->high ref.V */
1727#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
1728#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 /* input bits */
1729#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 /* output bits */
1730#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 /* dirs: 0=out, 1=in */
1731
1732/* Do NOT add new commands beyond 0x4f as part of 3.0 : 0x50 - 0x7f will be
1733 * used for post-3.0 extensions. If you run out of space, look for gaps or
1734 * commands that are unused in the existing range. */
1735
1580#endif /* MCDI_PCOL_H */ 1736#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index eb694af7a473..34c22fa986e2 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -381,6 +381,18 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
381 * but by convention we don't */ 381 * but by convention we don't */
382 efx->loopback_modes &= ~(1 << LOOPBACK_NONE); 382 efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
383 383
384 /* Set the initial link mode */
385 efx_mcdi_phy_decode_link(
386 efx, &efx->link_state,
387 MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
388 MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
389 MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
390
391 /* Default to Autonegotiated flow control if the PHY supports it */
392 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
393 if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
394 efx->wanted_fc |= EFX_FC_AUTO;
395
384 return 0; 396 return 0;
385 397
386fail: 398fail:
@@ -436,7 +448,7 @@ void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
436 448
437 /* The link partner capabilities are only relevent if the 449 /* The link partner capabilities are only relevent if the
438 * link supports flow control autonegotiation */ 450 * link supports flow control autonegotiation */
439 if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) 451 if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
440 return; 452 return;
441 453
442 /* If flow control autoneg is supported and enabled, then fine */ 454 /* If flow control autoneg is supported and enabled, then fine */
@@ -560,6 +572,27 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
560 return 0; 572 return 0;
561} 573}
562 574
575static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
576{
577 u8 outbuf[MC_CMD_GET_PHY_STATE_OUT_LEN];
578 size_t outlen;
579 int rc;
580
581 BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0);
582
583 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0,
584 outbuf, sizeof(outbuf), &outlen);
585 if (rc)
586 return rc;
587
588 if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
589 return -EMSGSIZE;
590 if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK)
591 return -EINVAL;
592
593 return 0;
594}
595
563struct efx_phy_operations efx_mcdi_phy_ops = { 596struct efx_phy_operations efx_mcdi_phy_ops = {
564 .probe = efx_mcdi_phy_probe, 597 .probe = efx_mcdi_phy_probe,
565 .init = efx_port_dummy_op_int, 598 .init = efx_port_dummy_op_int,
@@ -569,6 +602,7 @@ struct efx_phy_operations efx_mcdi_phy_ops = {
569 .remove = efx_mcdi_phy_remove, 602 .remove = efx_mcdi_phy_remove,
570 .get_settings = efx_mcdi_phy_get_settings, 603 .get_settings = efx_mcdi_phy_get_settings,
571 .set_settings = efx_mcdi_phy_set_settings, 604 .set_settings = efx_mcdi_phy_set_settings,
605 .test_alive = efx_mcdi_phy_test_alive,
572 .run_tests = NULL, 606 .run_tests = NULL,
573 .test_name = NULL, 607 .test_name = NULL,
574}; 608};
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 1574e52f0594..0548fcbbdcd0 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -335,3 +335,27 @@ enum efx_fc_type efx_mdio_get_pause(struct efx_nic *efx)
335 mii_advertise_flowctrl(efx->wanted_fc), 335 mii_advertise_flowctrl(efx->wanted_fc),
336 efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA)); 336 efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA));
337} 337}
338
339int efx_mdio_test_alive(struct efx_nic *efx)
340{
341 int rc;
342 int devad = __ffs(efx->mdio.mmds);
343 u16 physid1, physid2;
344
345 mutex_lock(&efx->mac_lock);
346
347 physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1);
348 physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2);
349
350 if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
351 (physid2 == 0x0000) || (physid2 == 0xffff)) {
352 EFX_ERR(efx, "no MDIO PHY present with ID %d\n",
353 efx->mdio.prtad);
354 rc = -EINVAL;
355 } else {
356 rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
357 }
358
359 mutex_unlock(&efx->mac_lock);
360 return rc;
361}
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index f6ac9503339d..f89e71929603 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -106,4 +106,7 @@ efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr,
106 mdio_set_flag(&efx->mdio, efx->mdio.prtad, devad, addr, mask, state); 106 mdio_set_flag(&efx->mdio, efx->mdio.prtad, devad, addr, mask, state);
107} 107}
108 108
109/* Liveness self-test for MDIO PHYs */
110extern int efx_mdio_test_alive(struct efx_nic *efx);
111
109#endif /* EFX_MDIO_10G_H */ 112#endif /* EFX_MDIO_10G_H */
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index d5aab5b3fa06..cb018e272097 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -18,7 +18,6 @@
18#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
19#include <linux/ethtool.h> 19#include <linux/ethtool.h>
20#include <linux/if_vlan.h> 20#include <linux/if_vlan.h>
21#include <linux/timer.h>
22#include <linux/mdio.h> 21#include <linux/mdio.h>
23#include <linux/list.h> 22#include <linux/list.h>
24#include <linux/pci.h> 23#include <linux/pci.h>
@@ -101,9 +100,6 @@ do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
101 * Special buffers are used for the event queues and the TX and RX 100 * Special buffers are used for the event queues and the TX and RX
102 * descriptor queues for each channel. They are *not* used for the 101 * descriptor queues for each channel. They are *not* used for the
103 * actual transmit and receive buffers. 102 * actual transmit and receive buffers.
104 *
105 * Note that for Falcon, TX and RX descriptor queues live in host memory.
106 * Allocation and freeing procedures must take this into account.
107 */ 103 */
108struct efx_special_buffer { 104struct efx_special_buffer {
109 void *addr; 105 void *addr;
@@ -300,7 +296,7 @@ struct efx_rx_queue {
300 * @dma_addr: DMA base address of the buffer 296 * @dma_addr: DMA base address of the buffer
301 * @len: Buffer length, in bytes 297 * @len: Buffer length, in bytes
302 * 298 *
303 * Falcon uses these buffers for its interrupt status registers and 299 * The NIC uses these buffers for its interrupt status registers and
304 * MAC stats dumps. 300 * MAC stats dumps.
305 */ 301 */
306struct efx_buffer { 302struct efx_buffer {
@@ -516,8 +512,9 @@ struct efx_mac_operations {
516 * @set_settings: Set ethtool settings. Serialised by the mac_lock. 512 * @set_settings: Set ethtool settings. Serialised by the mac_lock.
517 * @set_npage_adv: Set abilities advertised in (Extended) Next Page 513 * @set_npage_adv: Set abilities advertised in (Extended) Next Page
518 * (only needed where AN bit is set in mmds) 514 * (only needed where AN bit is set in mmds)
515 * @test_alive: Test that PHY is 'alive' (online)
519 * @test_name: Get the name of a PHY-specific test/result 516 * @test_name: Get the name of a PHY-specific test/result
520 * @run_tests: Run tests and record results as appropriate. 517 * @run_tests: Run tests and record results as appropriate (offline).
521 * Flags are the ethtool tests flags. 518 * Flags are the ethtool tests flags.
522 */ 519 */
523struct efx_phy_operations { 520struct efx_phy_operations {
@@ -532,6 +529,7 @@ struct efx_phy_operations {
532 int (*set_settings) (struct efx_nic *efx, 529 int (*set_settings) (struct efx_nic *efx,
533 struct ethtool_cmd *ecmd); 530 struct ethtool_cmd *ecmd);
534 void (*set_npage_adv) (struct efx_nic *efx, u32); 531 void (*set_npage_adv) (struct efx_nic *efx, u32);
532 int (*test_alive) (struct efx_nic *efx);
535 const char *(*test_name) (struct efx_nic *efx, unsigned int index); 533 const char *(*test_name) (struct efx_nic *efx, unsigned int index);
536 int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags); 534 int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
537}; 535};
@@ -672,7 +670,7 @@ union efx_multicast_hash {
672 * @irq_status: Interrupt status buffer 670 * @irq_status: Interrupt status buffer
673 * @last_irq_cpu: Last CPU to handle interrupt. 671 * @last_irq_cpu: Last CPU to handle interrupt.
674 * This register is written with the SMP processor ID whenever an 672 * This register is written with the SMP processor ID whenever an
675 * interrupt is handled. It is used by falcon_test_interrupt() 673 * interrupt is handled. It is used by efx_nic_test_interrupt()
676 * to verify that an interrupt has occurred. 674 * to verify that an interrupt has occurred.
677 * @spi_flash: SPI flash device 675 * @spi_flash: SPI flash device
678 * This field will be %NULL if no flash device is present (or for Siena). 676 * This field will be %NULL if no flash device is present (or for Siena).
@@ -721,8 +719,7 @@ union efx_multicast_hash {
721 * @loopback_modes: Supported loopback mode bitmask 719 * @loopback_modes: Supported loopback mode bitmask
722 * @loopback_selftest: Offline self-test private state 720 * @loopback_selftest: Offline self-test private state
723 * 721 *
724 * The @priv field of the corresponding &struct net_device points to 722 * This is stored in the private area of the &struct net_device.
725 * this.
726 */ 723 */
727struct efx_nic { 724struct efx_nic {
728 char name[IFNAMSIZ]; 725 char name[IFNAMSIZ];
@@ -995,7 +992,7 @@ static inline void clear_bit_le(unsigned nr, unsigned char *addr)
995 * that the net driver will program into the MAC as the maximum frame 992 * that the net driver will program into the MAC as the maximum frame
996 * length. 993 * length.
997 * 994 *
998 * The 10G MAC used in Falcon requires 8-byte alignment on the frame 995 * The 10G MAC requires 8-byte alignment on the frame
999 * length, so we round up to the nearest 8. 996 * length, so we round up to the nearest 8.
1000 * 997 *
1001 * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an 998 * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index db44224ed2ca..b06f8e348307 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -623,10 +623,6 @@ void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
623 * 623 *
624 * This writes the EVQ_RPTR_REG register for the specified channel's 624 * This writes the EVQ_RPTR_REG register for the specified channel's
625 * event queue. 625 * event queue.
626 *
627 * Note that EVQ_RPTR_REG contains the index of the "last read" event,
628 * whereas channel->eventq_read_ptr contains the index of the "next to
629 * read" event.
630 */ 626 */
631void efx_nic_eventq_read_ack(struct efx_channel *channel) 627void efx_nic_eventq_read_ack(struct efx_channel *channel)
632{ 628{
@@ -1384,6 +1380,15 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1384 efx->last_irq_cpu = raw_smp_processor_id(); 1380 efx->last_irq_cpu = raw_smp_processor_id();
1385 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1381 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1386 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1382 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1383 } else if (EFX_WORKAROUND_15783(efx)) {
1384 /* We can't return IRQ_HANDLED more than once on seeing ISR0=0
1385 * because this might be a shared interrupt, but we do need to
1386 * check the channel every time and preemptively rearm it if
1387 * it's idle. */
1388 efx_for_each_channel(channel, efx) {
1389 if (!channel->work_pending)
1390 efx_nic_eventq_read_ack(channel);
1391 }
1387 } 1392 }
1388 1393
1389 return result; 1394 return result;
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index 67eec7a6e487..1bee62c83001 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -445,4 +445,5 @@ struct efx_phy_operations falcon_qt202x_phy_ops = {
445 .remove = qt202x_phy_remove, 445 .remove = qt202x_phy_remove,
446 .get_settings = qt202x_phy_get_settings, 446 .get_settings = qt202x_phy_get_settings,
447 .set_settings = efx_mdio_set_settings, 447 .set_settings = efx_mdio_set_settings,
448 .test_alive = efx_mdio_test_alive,
448}; 449};
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 250c8827b842..cf0139a7d9a4 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -24,9 +24,6 @@
24#include "nic.h" 24#include "nic.h"
25#include "selftest.h" 25#include "selftest.h"
26#include "workarounds.h" 26#include "workarounds.h"
27#include "spi.h"
28#include "io.h"
29#include "mdio_10g.h"
30 27
31/* 28/*
32 * Loopback test packet structure 29 * Loopback test packet structure
@@ -76,42 +73,15 @@ struct efx_loopback_state {
76 * 73 *
77 **************************************************************************/ 74 **************************************************************************/
78 75
79static int efx_test_mdio(struct efx_nic *efx, struct efx_self_tests *tests) 76static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests)
80{ 77{
81 int rc = 0; 78 int rc = 0;
82 int devad;
83 u16 physid1, physid2;
84
85 if (efx->mdio.mode_support & MDIO_SUPPORTS_C45)
86 devad = __ffs(efx->mdio.mmds);
87 else if (efx->mdio.mode_support & MDIO_SUPPORTS_C22)
88 devad = MDIO_DEVAD_NONE;
89 else
90 return 0;
91
92 mutex_lock(&efx->mac_lock);
93 tests->mdio = -1;
94
95 physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1);
96 physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2);
97 79
98 if ((physid1 == 0x0000) || (physid1 == 0xffff) || 80 if (efx->phy_op->test_alive) {
99 (physid2 == 0x0000) || (physid2 == 0xffff)) { 81 rc = efx->phy_op->test_alive(efx);
100 EFX_ERR(efx, "no MDIO PHY present with ID %d\n", 82 tests->phy_alive = rc ? -1 : 1;
101 efx->mdio.prtad);
102 rc = -EINVAL;
103 goto out;
104 } 83 }
105 84
106 if (EFX_IS10G(efx)) {
107 rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
108 if (rc)
109 goto out;
110 }
111
112out:
113 mutex_unlock(&efx->mac_lock);
114 tests->mdio = rc ? -1 : 1;
115 return rc; 85 return rc;
116} 86}
117 87
@@ -258,7 +228,7 @@ static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
258 return 0; 228 return 0;
259 229
260 mutex_lock(&efx->mac_lock); 230 mutex_lock(&efx->mac_lock);
261 rc = efx->phy_op->run_tests(efx, tests->phy, flags); 231 rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
262 mutex_unlock(&efx->mac_lock); 232 mutex_unlock(&efx->mac_lock);
263 return rc; 233 return rc;
264} 234}
@@ -684,7 +654,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
684 /* Online (i.e. non-disruptive) testing 654 /* Online (i.e. non-disruptive) testing
685 * This checks interrupt generation, event delivery and PHY presence. */ 655 * This checks interrupt generation, event delivery and PHY presence. */
686 656
687 rc = efx_test_mdio(efx, tests); 657 rc = efx_test_phy_alive(efx, tests);
688 if (rc && !rc_test) 658 if (rc && !rc_test)
689 rc_test = rc; 659 rc_test = rc;
690 660
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index f6feee04c96b..643bef72b99d 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -32,7 +32,7 @@ struct efx_loopback_self_tests {
32 */ 32 */
33struct efx_self_tests { 33struct efx_self_tests {
34 /* online tests */ 34 /* online tests */
35 int mdio; 35 int phy_alive;
36 int nvram; 36 int nvram;
37 int interrupt; 37 int interrupt;
38 int eventq_dma[EFX_MAX_CHANNELS]; 38 int eventq_dma[EFX_MAX_CHANNELS];
@@ -40,7 +40,7 @@ struct efx_self_tests {
40 int eventq_poll[EFX_MAX_CHANNELS]; 40 int eventq_poll[EFX_MAX_CHANNELS];
41 /* offline tests */ 41 /* offline tests */
42 int registers; 42 int registers;
43 int phy[EFX_MAX_PHY_TESTS]; 43 int phy_ext[EFX_MAX_PHY_TESTS];
44 struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1]; 44 struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
45}; 45};
46 46
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index f8c6771e66d8..1619fb5a64f5 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -106,16 +106,11 @@ static int siena_probe_port(struct efx_nic *efx)
106 efx->mdio.mdio_read = siena_mdio_read; 106 efx->mdio.mdio_read = siena_mdio_read;
107 efx->mdio.mdio_write = siena_mdio_write; 107 efx->mdio.mdio_write = siena_mdio_write;
108 108
109 /* Fill out MDIO structure and loopback modes */ 109 /* Fill out MDIO structure, loopback modes, and initial link state */
110 rc = efx->phy_op->probe(efx); 110 rc = efx->phy_op->probe(efx);
111 if (rc != 0) 111 if (rc != 0)
112 return rc; 112 return rc;
113 113
114 /* Initial assumption */
115 efx->link_state.speed = 10000;
116 efx->link_state.fd = true;
117 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
118
119 /* Allocate buffer for stats */ 114 /* Allocate buffer for stats */
120 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, 115 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
121 MC_CMD_MAC_NSTATS * sizeof(u64)); 116 MC_CMD_MAC_NSTATS * sizeof(u64));
@@ -139,7 +134,7 @@ void siena_remove_port(struct efx_nic *efx)
139 134
140static const struct efx_nic_register_test siena_register_tests[] = { 135static const struct efx_nic_register_test siena_register_tests[] = {
141 { FR_AZ_ADR_REGION, 136 { FR_AZ_ADR_REGION,
142 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) }, 137 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
143 { FR_CZ_USR_EV_CFG, 138 { FR_CZ_USR_EV_CFG,
144 EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) }, 139 EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) },
145 { FR_AZ_RX_CFG, 140 { FR_AZ_RX_CFG,
@@ -181,6 +176,12 @@ static int siena_test_registers(struct efx_nic *efx)
181 176
182static int siena_reset_hw(struct efx_nic *efx, enum reset_type method) 177static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
183{ 178{
179 int rc;
180
181 /* Recover from a failed assertion pre-reset */
182 rc = efx_mcdi_handle_assertion(efx);
183 if (rc)
184 return rc;
184 185
185 if (method == RESET_TYPE_WORLD) 186 if (method == RESET_TYPE_WORLD)
186 return efx_mcdi_reset_mc(efx); 187 return efx_mcdi_reset_mc(efx);
@@ -582,6 +583,7 @@ struct efx_nic_type siena_a0_nic_type = {
582 .set_wol = siena_set_wol, 583 .set_wol = siena_set_wol,
583 .resume_wol = siena_init_wol, 584 .resume_wol = siena_init_wol,
584 .test_registers = siena_test_registers, 585 .test_registers = siena_test_registers,
586 .test_nvram = efx_mcdi_nvram_test_all,
585 .default_mac_ops = &efx_mcdi_mac_operations, 587 .default_mac_ops = &efx_mcdi_mac_operations,
586 588
587 .revision = EFX_REV_SIENA_A0, 589 .revision = EFX_REV_SIENA_A0,
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index 3009c297c135..10db071bd837 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -842,6 +842,7 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = {
842 .get_settings = tenxpress_get_settings, 842 .get_settings = tenxpress_get_settings,
843 .set_settings = tenxpress_set_settings, 843 .set_settings = tenxpress_set_settings,
844 .set_npage_adv = sfx7101_set_npage_adv, 844 .set_npage_adv = sfx7101_set_npage_adv,
845 .test_alive = efx_mdio_test_alive,
845 .test_name = sfx7101_test_name, 846 .test_name = sfx7101_test_name,
846 .run_tests = sfx7101_run_tests, 847 .run_tests = sfx7101_run_tests,
847}; 848};
@@ -856,6 +857,7 @@ struct efx_phy_operations falcon_sft9001_phy_ops = {
856 .get_settings = tenxpress_get_settings, 857 .get_settings = tenxpress_get_settings,
857 .set_settings = tenxpress_set_settings, 858 .set_settings = tenxpress_set_settings,
858 .set_npage_adv = sft9001_set_npage_adv, 859 .set_npage_adv = sft9001_set_npage_adv,
860 .test_alive = efx_mdio_test_alive,
859 .test_name = sft9001_test_name, 861 .test_name = sft9001_test_name,
860 .run_tests = sft9001_run_tests, 862 .run_tests = sft9001_run_tests,
861}; 863};
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 6b364a6c6c60..ed999d31f1fa 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -660,7 +660,7 @@ static void sgiseeq_set_multicast(struct net_device *dev)
660 660
661 if(dev->flags & IFF_PROMISC) 661 if(dev->flags & IFF_PROMISC)
662 sp->mode = SEEQ_RCMD_RANY; 662 sp->mode = SEEQ_RCMD_RANY;
663 else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count) 663 else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
664 sp->mode = SEEQ_RCMD_RBMCAST; 664 sp->mode = SEEQ_RCMD_RBMCAST;
665 else 665 else
666 sp->mode = SEEQ_RCMD_RBCAST; 666 sp->mode = SEEQ_RCMD_RBCAST;
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 7402b858cab7..42a35f086a9f 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -1473,13 +1473,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1473 if (ret) 1473 if (ret)
1474 goto out_unregister; 1474 goto out_unregister;
1475 1475
1476 /* pritnt device infomation */ 1476 /* print device infomation */
1477 pr_info("Base address at 0x%x, ", 1477 pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
1478 (u32)ndev->base_addr); 1478 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
1479
1480 for (i = 0; i < 5; i++)
1481 printk("%02X:", ndev->dev_addr[i]);
1482 printk("%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq);
1483 1479
1484 platform_set_drvdata(pdev, ndev); 1480 platform_set_drvdata(pdev, ndev);
1485 1481
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 31233b4c44a0..760d9e83a465 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -17,7 +17,9 @@
17 17
18 See the file COPYING in this distribution for more information. 18 See the file COPYING in this distribution for more information.
19 19
20 */ 20*/
21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 23
22#include <linux/module.h> 24#include <linux/module.h>
23#include <linux/moduleparam.h> 25#include <linux/moduleparam.h>
@@ -32,25 +34,13 @@
32#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
33#include <asm/irq.h> 35#include <asm/irq.h>
34 36
35#define net_drv(p, arg...) if (netif_msg_drv(p)) \
36 printk(arg)
37#define net_probe(p, arg...) if (netif_msg_probe(p)) \
38 printk(arg)
39#define net_link(p, arg...) if (netif_msg_link(p)) \
40 printk(arg)
41#define net_intr(p, arg...) if (netif_msg_intr(p)) \
42 printk(arg)
43#define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
44 printk(arg)
45
46#define PHY_MAX_ADDR 32 37#define PHY_MAX_ADDR 32
47#define PHY_ID_ANY 0x1f 38#define PHY_ID_ANY 0x1f
48#define MII_REG_ANY 0x1f 39#define MII_REG_ANY 0x1f
49 40
50#define DRV_VERSION "1.3" 41#define DRV_VERSION "1.4"
51#define DRV_NAME "sis190" 42#define DRV_NAME "sis190"
52#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION 43#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
53#define PFX DRV_NAME ": "
54 44
55#define sis190_rx_skb netif_rx 45#define sis190_rx_skb netif_rx
56#define sis190_rx_quota(count, quota) count 46#define sis190_rx_quota(count, quota) count
@@ -294,6 +284,12 @@ struct sis190_private {
294 struct mii_if_info mii_if; 284 struct mii_if_info mii_if;
295 struct list_head first_phy; 285 struct list_head first_phy;
296 u32 features; 286 u32 features;
287 u32 negotiated_lpa;
288 enum {
289 LNK_OFF,
290 LNK_ON,
291 LNK_AUTONEG,
292 } link_status;
297}; 293};
298 294
299struct sis190_phy { 295struct sis190_phy {
@@ -334,7 +330,7 @@ static const struct {
334 { "SiS 191 PCI Gigabit Ethernet adapter" }, 330 { "SiS 191 PCI Gigabit Ethernet adapter" },
335}; 331};
336 332
337static struct pci_device_id sis190_pci_tbl[] = { 333static DEFINE_PCI_DEVICE_TABLE(sis190_pci_tbl) = {
338 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 }, 334 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
339 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 }, 335 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
340 { 0, }, 336 { 0, },
@@ -381,7 +377,7 @@ static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
381 } 377 }
382 378
383 if (i > 99) 379 if (i > 99)
384 printk(KERN_ERR PFX "PHY command failed !\n"); 380 pr_err("PHY command failed !\n");
385} 381}
386 382
387static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val) 383static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
@@ -493,18 +489,24 @@ static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
493{ 489{
494 u32 rx_buf_sz = tp->rx_buf_sz; 490 u32 rx_buf_sz = tp->rx_buf_sz;
495 struct sk_buff *skb; 491 struct sk_buff *skb;
492 dma_addr_t mapping;
496 493
497 skb = netdev_alloc_skb(tp->dev, rx_buf_sz); 494 skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
498 if (likely(skb)) { 495 if (unlikely(!skb))
499 dma_addr_t mapping; 496 goto skb_alloc_failed;
500 497 mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
501 mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz, 498 PCI_DMA_FROMDEVICE);
502 PCI_DMA_FROMDEVICE); 499 if (pci_dma_mapping_error(tp->pci_dev, mapping))
503 sis190_map_to_asic(desc, mapping, rx_buf_sz); 500 goto out;
504 } else 501 sis190_map_to_asic(desc, mapping, rx_buf_sz);
505 sis190_make_unusable_by_asic(desc);
506 502
507 return skb; 503 return skb;
504
505out:
506 dev_kfree_skb_any(skb);
507skb_alloc_failed:
508 sis190_make_unusable_by_asic(desc);
509 return NULL;
508} 510}
509 511
510static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev, 512static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
@@ -589,8 +591,7 @@ static int sis190_rx_interrupt(struct net_device *dev,
589 591
590 status = le32_to_cpu(desc->PSize); 592 status = le32_to_cpu(desc->PSize);
591 593
592 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name, 594 //netif_info(tp, intr, dev, "Rx PSize = %08x\n", status);
593 // status);
594 595
595 if (sis190_rx_pkt_err(status, stats) < 0) 596 if (sis190_rx_pkt_err(status, stats) < 0)
596 sis190_give_to_asic(desc, tp->rx_buf_sz); 597 sis190_give_to_asic(desc, tp->rx_buf_sz);
@@ -601,9 +602,8 @@ static int sis190_rx_interrupt(struct net_device *dev,
601 struct pci_dev *pdev = tp->pci_dev; 602 struct pci_dev *pdev = tp->pci_dev;
602 603
603 if (unlikely(pkt_size > tp->rx_buf_sz)) { 604 if (unlikely(pkt_size > tp->rx_buf_sz)) {
604 net_intr(tp, KERN_INFO 605 netif_info(tp, intr, dev,
605 "%s: (frag) status = %08x.\n", 606 "(frag) status = %08x\n", status);
606 dev->name, status);
607 stats->rx_dropped++; 607 stats->rx_dropped++;
608 stats->rx_length_errors++; 608 stats->rx_length_errors++;
609 sis190_give_to_asic(desc, tp->rx_buf_sz); 609 sis190_give_to_asic(desc, tp->rx_buf_sz);
@@ -637,12 +637,12 @@ static int sis190_rx_interrupt(struct net_device *dev,
637 tp->cur_rx = cur_rx; 637 tp->cur_rx = cur_rx;
638 638
639 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx); 639 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
640 if (!delta && count && netif_msg_intr(tp)) 640 if (!delta && count)
641 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name); 641 netif_info(tp, intr, dev, "no Rx buffer allocated\n");
642 tp->dirty_rx += delta; 642 tp->dirty_rx += delta;
643 643
644 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp)) 644 if ((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx)
645 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name); 645 netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
646 646
647 return count; 647 return count;
648} 648}
@@ -751,10 +751,11 @@ static irqreturn_t sis190_interrupt(int irq, void *__dev)
751 751
752 SIS_W32(IntrStatus, status); 752 SIS_W32(IntrStatus, status);
753 753
754 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status); 754// netif_info(tp, intr, dev, "status = %08x\n", status);
755 755
756 if (status & LinkChange) { 756 if (status & LinkChange) {
757 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name); 757 netif_info(tp, intr, dev, "link change\n");
758 del_timer(&tp->timer);
758 schedule_work(&tp->phy_task); 759 schedule_work(&tp->phy_task);
759 } 760 }
760 761
@@ -841,19 +842,17 @@ static void sis190_set_rx_mode(struct net_device *dev)
841 AcceptBroadcast | AcceptMulticast | AcceptMyPhys | 842 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
842 AcceptAllPhys; 843 AcceptAllPhys;
843 mc_filter[1] = mc_filter[0] = 0xffffffff; 844 mc_filter[1] = mc_filter[0] = 0xffffffff;
844 } else if ((dev->mc_count > multicast_filter_limit) || 845 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
845 (dev->flags & IFF_ALLMULTI)) { 846 (dev->flags & IFF_ALLMULTI)) {
846 /* Too many to filter perfectly -- accept all multicasts. */ 847 /* Too many to filter perfectly -- accept all multicasts. */
847 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 848 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
848 mc_filter[1] = mc_filter[0] = 0xffffffff; 849 mc_filter[1] = mc_filter[0] = 0xffffffff;
849 } else { 850 } else {
850 struct dev_mc_list *mclist; 851 struct dev_mc_list *mclist;
851 unsigned int i;
852 852
853 rx_mode = AcceptBroadcast | AcceptMyPhys; 853 rx_mode = AcceptBroadcast | AcceptMyPhys;
854 mc_filter[1] = mc_filter[0] = 0; 854 mc_filter[1] = mc_filter[0] = 0;
855 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 855 netdev_for_each_mc_addr(mclist, dev) {
856 i++, mclist = mclist->next) {
857 int bit_nr = 856 int bit_nr =
858 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f; 857 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
859 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 858 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
@@ -929,13 +928,15 @@ static void sis190_phy_task(struct work_struct *work)
929 if (val & BMCR_RESET) { 928 if (val & BMCR_RESET) {
930 // FIXME: needlessly high ? -- FR 02/07/2005 929 // FIXME: needlessly high ? -- FR 02/07/2005
931 mod_timer(&tp->timer, jiffies + HZ/10); 930 mod_timer(&tp->timer, jiffies + HZ/10);
932 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) & 931 goto out_unlock;
933 BMSR_ANEGCOMPLETE)) { 932 }
933
934 val = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
935 if (!(val & BMSR_ANEGCOMPLETE) && tp->link_status != LNK_AUTONEG) {
934 netif_carrier_off(dev); 936 netif_carrier_off(dev);
935 net_link(tp, KERN_WARNING "%s: auto-negotiating...\n", 937 netif_warn(tp, link, dev, "auto-negotiating...\n");
936 dev->name); 938 tp->link_status = LNK_AUTONEG;
937 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT); 939 } else if ((val & BMSR_LSTATUS) && tp->link_status != LNK_ON) {
938 } else {
939 /* Rejoice ! */ 940 /* Rejoice ! */
940 struct { 941 struct {
941 int val; 942 int val;
@@ -959,13 +960,13 @@ static void sis190_phy_task(struct work_struct *work)
959 u16 adv, autoexp, gigadv, gigrec; 960 u16 adv, autoexp, gigadv, gigrec;
960 961
961 val = mdio_read(ioaddr, phy_id, 0x1f); 962 val = mdio_read(ioaddr, phy_id, 0x1f);
962 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val); 963 netif_info(tp, link, dev, "mii ext = %04x\n", val);
963 964
964 val = mdio_read(ioaddr, phy_id, MII_LPA); 965 val = mdio_read(ioaddr, phy_id, MII_LPA);
965 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE); 966 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
966 autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION); 967 autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION);
967 net_link(tp, KERN_INFO "%s: mii lpa=%04x adv=%04x exp=%04x.\n", 968 netif_info(tp, link, dev, "mii lpa=%04x adv=%04x exp=%04x\n",
968 dev->name, val, adv, autoexp); 969 val, adv, autoexp);
969 970
970 if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) { 971 if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) {
971 /* check for gigabit speed */ 972 /* check for gigabit speed */
@@ -1004,10 +1005,14 @@ static void sis190_phy_task(struct work_struct *work)
1004 SIS_W32(RGDelay, 0x0440); 1005 SIS_W32(RGDelay, 0x0440);
1005 } 1006 }
1006 1007
1007 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name, 1008 tp->negotiated_lpa = p->val;
1008 p->msg); 1009
1010 netif_info(tp, link, dev, "link on %s mode\n", p->msg);
1009 netif_carrier_on(dev); 1011 netif_carrier_on(dev);
1010 } 1012 tp->link_status = LNK_ON;
1013 } else if (!(val & BMSR_LSTATUS) && tp->link_status != LNK_AUTONEG)
1014 tp->link_status = LNK_OFF;
1015 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
1011 1016
1012out_unlock: 1017out_unlock:
1013 rtnl_unlock(); 1018 rtnl_unlock();
@@ -1191,13 +1196,17 @@ static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
1191 1196
1192 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) { 1197 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1193 netif_stop_queue(dev); 1198 netif_stop_queue(dev);
1194 net_tx_err(tp, KERN_ERR PFX 1199 netif_err(tp, tx_err, dev,
1195 "%s: BUG! Tx Ring full when queue awake!\n", 1200 "BUG! Tx Ring full when queue awake!\n");
1196 dev->name);
1197 return NETDEV_TX_BUSY; 1201 return NETDEV_TX_BUSY;
1198 } 1202 }
1199 1203
1200 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE); 1204 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1205 if (pci_dma_mapping_error(tp->pci_dev, mapping)) {
1206 netif_err(tp, tx_err, dev,
1207 "PCI mapping failed, dropping packet");
1208 return NETDEV_TX_BUSY;
1209 }
1201 1210
1202 tp->Tx_skbuff[entry] = skb; 1211 tp->Tx_skbuff[entry] = skb;
1203 1212
@@ -1211,6 +1220,12 @@ static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
1211 wmb(); 1220 wmb();
1212 1221
1213 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit); 1222 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1223 if (tp->negotiated_lpa & (LPA_1000HALF | LPA_100HALF | LPA_10HALF)) {
1224 /* Half Duplex */
1225 desc->status |= cpu_to_le32(COLEN | CRSEN | BKFEN);
1226 if (tp->negotiated_lpa & (LPA_1000HALF | LPA_1000FULL))
1227 desc->status |= cpu_to_le32(EXTEN | BSTEN); /* gigabit HD */
1228 }
1214 1229
1215 tp->cur_tx++; 1230 tp->cur_tx++;
1216 1231
@@ -1287,9 +1302,9 @@ static u16 sis190_default_phy(struct net_device *dev)
1287 1302
1288 if (mii_if->phy_id != phy_default->phy_id) { 1303 if (mii_if->phy_id != phy_default->phy_id) {
1289 mii_if->phy_id = phy_default->phy_id; 1304 mii_if->phy_id = phy_default->phy_id;
1290 net_probe(tp, KERN_INFO 1305 if (netif_msg_probe(tp))
1291 "%s: Using transceiver at address %d as default.\n", 1306 pr_info("%s: Using transceiver at address %d as default\n",
1292 pci_name(tp->pci_dev), mii_if->phy_id); 1307 pci_name(tp->pci_dev), mii_if->phy_id);
1293 } 1308 }
1294 1309
1295 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR); 1310 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
@@ -1327,14 +1342,15 @@ static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1327 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ? 1342 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1328 LAN : HOME) : p->type; 1343 LAN : HOME) : p->type;
1329 tp->features |= p->feature; 1344 tp->features |= p->feature;
1330 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n", 1345 if (netif_msg_probe(tp))
1331 pci_name(tp->pci_dev), p->name, phy_id); 1346 pr_info("%s: %s transceiver at address %d\n",
1347 pci_name(tp->pci_dev), p->name, phy_id);
1332 } else { 1348 } else {
1333 phy->type = UNKNOWN; 1349 phy->type = UNKNOWN;
1334 net_probe(tp, KERN_INFO 1350 if (netif_msg_probe(tp))
1335 "%s: unknown PHY 0x%x:0x%x transceiver at address %d\n", 1351 pr_info("%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
1336 pci_name(tp->pci_dev), 1352 pci_name(tp->pci_dev),
1337 phy->id[0], (phy->id[1] & 0xfff0), phy_id); 1353 phy->id[0], (phy->id[1] & 0xfff0), phy_id);
1338 } 1354 }
1339} 1355}
1340 1356
@@ -1398,8 +1414,9 @@ static int __devinit sis190_mii_probe(struct net_device *dev)
1398 } 1414 }
1399 1415
1400 if (list_empty(&tp->first_phy)) { 1416 if (list_empty(&tp->first_phy)) {
1401 net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n", 1417 if (netif_msg_probe(tp))
1402 pci_name(tp->pci_dev)); 1418 pr_info("%s: No MII transceivers found!\n",
1419 pci_name(tp->pci_dev));
1403 rc = -EIO; 1420 rc = -EIO;
1404 goto out; 1421 goto out;
1405 } 1422 }
@@ -1445,7 +1462,8 @@ static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1445 1462
1446 dev = alloc_etherdev(sizeof(*tp)); 1463 dev = alloc_etherdev(sizeof(*tp));
1447 if (!dev) { 1464 if (!dev) {
1448 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n"); 1465 if (netif_msg_drv(&debug))
1466 pr_err("unable to alloc new ethernet\n");
1449 rc = -ENOMEM; 1467 rc = -ENOMEM;
1450 goto err_out_0; 1468 goto err_out_0;
1451 } 1469 }
@@ -1458,34 +1476,39 @@ static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1458 1476
1459 rc = pci_enable_device(pdev); 1477 rc = pci_enable_device(pdev);
1460 if (rc < 0) { 1478 if (rc < 0) {
1461 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev)); 1479 if (netif_msg_probe(tp))
1480 pr_err("%s: enable failure\n", pci_name(pdev));
1462 goto err_free_dev_1; 1481 goto err_free_dev_1;
1463 } 1482 }
1464 1483
1465 rc = -ENODEV; 1484 rc = -ENODEV;
1466 1485
1467 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 1486 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1468 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n", 1487 if (netif_msg_probe(tp))
1469 pci_name(pdev)); 1488 pr_err("%s: region #0 is no MMIO resource\n",
1489 pci_name(pdev));
1470 goto err_pci_disable_2; 1490 goto err_pci_disable_2;
1471 } 1491 }
1472 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) { 1492 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1473 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n", 1493 if (netif_msg_probe(tp))
1474 pci_name(pdev)); 1494 pr_err("%s: invalid PCI region size(s)\n",
1495 pci_name(pdev));
1475 goto err_pci_disable_2; 1496 goto err_pci_disable_2;
1476 } 1497 }
1477 1498
1478 rc = pci_request_regions(pdev, DRV_NAME); 1499 rc = pci_request_regions(pdev, DRV_NAME);
1479 if (rc < 0) { 1500 if (rc < 0) {
1480 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n", 1501 if (netif_msg_probe(tp))
1481 pci_name(pdev)); 1502 pr_err("%s: could not request regions\n",
1503 pci_name(pdev));
1482 goto err_pci_disable_2; 1504 goto err_pci_disable_2;
1483 } 1505 }
1484 1506
1485 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1507 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1486 if (rc < 0) { 1508 if (rc < 0) {
1487 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n", 1509 if (netif_msg_probe(tp))
1488 pci_name(pdev)); 1510 pr_err("%s: DMA configuration failed\n",
1511 pci_name(pdev));
1489 goto err_free_res_3; 1512 goto err_free_res_3;
1490 } 1513 }
1491 1514
@@ -1493,14 +1516,16 @@ static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1493 1516
1494 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE); 1517 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1495 if (!ioaddr) { 1518 if (!ioaddr) {
1496 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n", 1519 if (netif_msg_probe(tp))
1497 pci_name(pdev)); 1520 pr_err("%s: cannot remap MMIO, aborting\n",
1521 pci_name(pdev));
1498 rc = -EIO; 1522 rc = -EIO;
1499 goto err_free_res_3; 1523 goto err_free_res_3;
1500 } 1524 }
1501 1525
1502 tp->pci_dev = pdev; 1526 tp->pci_dev = pdev;
1503 tp->mmio_addr = ioaddr; 1527 tp->mmio_addr = ioaddr;
1528 tp->link_status = LNK_OFF;
1504 1529
1505 sis190_irq_mask_and_ack(ioaddr); 1530 sis190_irq_mask_and_ack(ioaddr);
1506 1531
@@ -1530,9 +1555,8 @@ static void sis190_tx_timeout(struct net_device *dev)
1530 if (tmp8 & CmdTxEnb) 1555 if (tmp8 & CmdTxEnb)
1531 SIS_W8(TxControl, tmp8 & ~CmdTxEnb); 1556 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1532 1557
1533 1558 netif_info(tp, tx_err, dev, "Transmit timeout, status %08x %08x\n",
1534 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n", 1559 SIS_R32(TxControl), SIS_R32(TxSts));
1535 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1536 1560
1537 /* Disable interrupts by clearing the interrupt mask. */ 1561 /* Disable interrupts by clearing the interrupt mask. */
1538 SIS_W32(IntrMask, 0x0000); 1562 SIS_W32(IntrMask, 0x0000);
@@ -1561,15 +1585,16 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1561 u16 sig; 1585 u16 sig;
1562 int i; 1586 int i;
1563 1587
1564 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n", 1588 if (netif_msg_probe(tp))
1565 pci_name(pdev)); 1589 pr_info("%s: Read MAC address from EEPROM\n", pci_name(pdev));
1566 1590
1567 /* Check to see if there is a sane EEPROM */ 1591 /* Check to see if there is a sane EEPROM */
1568 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature); 1592 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1569 1593
1570 if ((sig == 0xffff) || (sig == 0x0000)) { 1594 if ((sig == 0xffff) || (sig == 0x0000)) {
1571 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n", 1595 if (netif_msg_probe(tp))
1572 pci_name(pdev), sig); 1596 pr_info("%s: Error EEPROM read %x\n",
1597 pci_name(pdev), sig);
1573 return -EIO; 1598 return -EIO;
1574 } 1599 }
1575 1600
@@ -1603,8 +1628,8 @@ static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1603 u8 reg, tmp8; 1628 u8 reg, tmp8;
1604 unsigned int i; 1629 unsigned int i;
1605 1630
1606 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n", 1631 if (netif_msg_probe(tp))
1607 pci_name(pdev)); 1632 pr_info("%s: Read MAC address from APC\n", pci_name(pdev));
1608 1633
1609 for (i = 0; i < ARRAY_SIZE(ids); i++) { 1634 for (i = 0; i < ARRAY_SIZE(ids); i++) {
1610 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL); 1635 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
@@ -1613,8 +1638,9 @@ static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1613 } 1638 }
1614 1639
1615 if (!isa_bridge) { 1640 if (!isa_bridge) {
1616 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n", 1641 if (netif_msg_probe(tp))
1617 pci_name(pdev)); 1642 pr_info("%s: Can not find ISA bridge\n",
1643 pci_name(pdev));
1618 return -EIO; 1644 return -EIO;
1619 } 1645 }
1620 1646
@@ -1695,7 +1721,7 @@ static void sis190_set_speed_auto(struct net_device *dev)
1695 int phy_id = tp->mii_if.phy_id; 1721 int phy_id = tp->mii_if.phy_id;
1696 int val; 1722 int val;
1697 1723
1698 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name); 1724 netif_info(tp, link, dev, "Enabling Auto-negotiation\n");
1699 1725
1700 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE); 1726 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1701 1727
@@ -1822,7 +1848,8 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
1822 int rc; 1848 int rc;
1823 1849
1824 if (!printed_version) { 1850 if (!printed_version) {
1825 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n"); 1851 if (netif_msg_drv(&debug))
1852 pr_info(SIS190_DRIVER_NAME " loaded\n");
1826 printed_version = 1; 1853 printed_version = 1;
1827 } 1854 }
1828 1855
@@ -1862,12 +1889,14 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
1862 if (rc < 0) 1889 if (rc < 0)
1863 goto err_remove_mii; 1890 goto err_remove_mii;
1864 1891
1865 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), %pM\n", 1892 if (netif_msg_probe(tp)) {
1866 pci_name(pdev), sis_chip_info[ent->driver_data].name, 1893 netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n",
1867 ioaddr, dev->irq, dev->dev_addr); 1894 pci_name(pdev),
1868 1895 sis_chip_info[ent->driver_data].name,
1869 net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name, 1896 ioaddr, dev->irq, dev->dev_addr);
1870 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII"); 1897 netdev_info(dev, "%s mode.\n",
1898 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1899 }
1871 1900
1872 netif_carrier_off(dev); 1901 netif_carrier_off(dev);
1873 1902
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 7360d4bbf75e..cc0c731c4f09 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -106,7 +106,7 @@ static const char * card_names[] = {
106 "SiS 900 PCI Fast Ethernet", 106 "SiS 900 PCI Fast Ethernet",
107 "SiS 7016 PCI Fast Ethernet" 107 "SiS 7016 PCI Fast Ethernet"
108}; 108};
109static struct pci_device_id sis900_pci_tbl [] = { 109static DEFINE_PCI_DEVICE_TABLE(sis900_pci_tbl) = {
110 {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_900, 110 {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_900,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_900}, 111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_900},
112 {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7016, 112 {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7016,
@@ -2288,7 +2288,7 @@ static void set_rx_mode(struct net_device *net_dev)
2288 rx_mode = RFPromiscuous; 2288 rx_mode = RFPromiscuous;
2289 for (i = 0; i < table_entries; i++) 2289 for (i = 0; i < table_entries; i++)
2290 mc_filter[i] = 0xffff; 2290 mc_filter[i] = 0xffff;
2291 } else if ((net_dev->mc_count > multicast_filter_limit) || 2291 } else if ((netdev_mc_count(net_dev) > multicast_filter_limit) ||
2292 (net_dev->flags & IFF_ALLMULTI)) { 2292 (net_dev->flags & IFF_ALLMULTI)) {
2293 /* too many multicast addresses or accept all multicast packet */ 2293 /* too many multicast addresses or accept all multicast packet */
2294 rx_mode = RFAAB | RFAAM; 2294 rx_mode = RFAAB | RFAAM;
@@ -2300,9 +2300,8 @@ static void set_rx_mode(struct net_device *net_dev)
2300 * packets */ 2300 * packets */
2301 struct dev_mc_list *mclist; 2301 struct dev_mc_list *mclist;
2302 rx_mode = RFAAB; 2302 rx_mode = RFAAB;
2303 for (i = 0, mclist = net_dev->mc_list; 2303
2304 mclist && i < net_dev->mc_count; 2304 netdev_for_each_mc_addr(mclist, net_dev) {
2305 i++, mclist = mclist->next) {
2306 unsigned int bit_nr = 2305 unsigned int bit_nr =
2307 sis900_mcast_bitnr(mclist->dmi_addr, sis_priv->chipset_rev); 2306 sis900_mcast_bitnr(mclist->dmi_addr, sis_priv->chipset_rev);
2308 mc_filter[bit_nr >> 4] |= (1 << (bit_nr & 0xf)); 2307 mc_filter[bit_nr >> 4] |= (1 << (bit_nr & 0xf));
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index db216a728503..1921a54ea995 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -149,7 +149,7 @@ extern void mac_drv_rx_mode(struct s_smc *smc, int mode);
149extern void mac_drv_clear_rx_queue(struct s_smc *smc); 149extern void mac_drv_clear_rx_queue(struct s_smc *smc);
150extern void enable_tx_irq(struct s_smc *smc, u_short queue); 150extern void enable_tx_irq(struct s_smc *smc, u_short queue);
151 151
152static struct pci_device_id skfddi_pci_tbl[] = { 152static DEFINE_PCI_DEVICE_TABLE(skfddi_pci_tbl) = {
153 { PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, }, 153 { PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
154 { } /* Terminating entry */ 154 { } /* Terminating entry */
155}; 155};
@@ -435,13 +435,7 @@ static int skfp_driver_init(struct net_device *dev)
435 goto fail; 435 goto fail;
436 } 436 }
437 read_address(smc, NULL); 437 read_address(smc, NULL);
438 pr_debug(KERN_INFO "HW-Addr: %02x %02x %02x %02x %02x %02x\n", 438 pr_debug(KERN_INFO "HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
439 smc->hw.fddi_canon_addr.a[0],
440 smc->hw.fddi_canon_addr.a[1],
441 smc->hw.fddi_canon_addr.a[2],
442 smc->hw.fddi_canon_addr.a[3],
443 smc->hw.fddi_canon_addr.a[4],
444 smc->hw.fddi_canon_addr.a[5]);
445 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6); 439 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
446 440
447 smt_reset_defaults(smc, 0); 441 smt_reset_defaults(smc, 0);
@@ -858,8 +852,7 @@ static void skfp_ctl_set_multicast_list(struct net_device *dev)
858static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev) 852static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
859{ 853{
860 struct s_smc *smc = netdev_priv(dev); 854 struct s_smc *smc = netdev_priv(dev);
861 struct dev_mc_list *dmi; /* ptr to multicast addr entry */ 855 struct dev_mc_list *dmi;
862 int i;
863 856
864 /* Enable promiscuous mode, if necessary */ 857 /* Enable promiscuous mode, if necessary */
865 if (dev->flags & IFF_PROMISC) { 858 if (dev->flags & IFF_PROMISC) {
@@ -878,29 +871,19 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
878 if (dev->flags & IFF_ALLMULTI) { 871 if (dev->flags & IFF_ALLMULTI) {
879 mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI); 872 mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
880 pr_debug(KERN_INFO "ENABLE ALL MC ADDRESSES\n"); 873 pr_debug(KERN_INFO "ENABLE ALL MC ADDRESSES\n");
881 } else if (dev->mc_count > 0) { 874 } else if (!netdev_mc_empty(dev)) {
882 if (dev->mc_count <= FPMAX_MULTICAST) { 875 if (netdev_mc_count(dev) <= FPMAX_MULTICAST) {
883 /* use exact filtering */ 876 /* use exact filtering */
884 877
885 // point to first multicast addr 878 // point to first multicast addr
886 dmi = dev->mc_list; 879 netdev_for_each_mc_addr(dmi, dev) {
887
888 for (i = 0; i < dev->mc_count; i++) {
889 mac_add_multicast(smc, 880 mac_add_multicast(smc,
890 (struct fddi_addr *)dmi->dmi_addr, 881 (struct fddi_addr *)dmi->dmi_addr,
891 1); 882 1);
892 883
893 pr_debug(KERN_INFO "ENABLE MC ADDRESS:"); 884 pr_debug(KERN_INFO "ENABLE MC ADDRESS: %pMF\n",
894 pr_debug(" %02x %02x %02x ", 885 dmi->dmi_addr);
895 dmi->dmi_addr[0], 886 }
896 dmi->dmi_addr[1],
897 dmi->dmi_addr[2]);
898 pr_debug("%02x %02x %02x\n",
899 dmi->dmi_addr[3],
900 dmi->dmi_addr[4],
901 dmi->dmi_addr[5]);
902 dmi = dmi->next;
903 } // for
904 887
905 } else { // more MC addresses than HW supports 888 } else { // more MC addresses than HW supports
906 889
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 379a3dc00163..d0058e5bb6ae 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -23,6 +23,8 @@
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */ 24 */
25 25
26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
26#include <linux/in.h> 28#include <linux/in.h>
27#include <linux/kernel.h> 29#include <linux/kernel.h>
28#include <linux/module.h> 30#include <linux/module.h>
@@ -46,7 +48,6 @@
46 48
47#define DRV_NAME "skge" 49#define DRV_NAME "skge"
48#define DRV_VERSION "1.13" 50#define DRV_VERSION "1.13"
49#define PFX DRV_NAME " "
50 51
51#define DEFAULT_TX_RING_SIZE 128 52#define DEFAULT_TX_RING_SIZE 128
52#define DEFAULT_RX_RING_SIZE 512 53#define DEFAULT_RX_RING_SIZE 512
@@ -70,15 +71,15 @@ MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
70MODULE_LICENSE("GPL"); 71MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_VERSION); 72MODULE_VERSION(DRV_VERSION);
72 73
73static const u32 default_msg 74static const u32 default_msg = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
74 = NETIF_MSG_DRV| NETIF_MSG_PROBE| NETIF_MSG_LINK 75 NETIF_MSG_LINK | NETIF_MSG_IFUP |
75 | NETIF_MSG_IFUP| NETIF_MSG_IFDOWN; 76 NETIF_MSG_IFDOWN);
76 77
77static int debug = -1; /* defaults above */ 78static int debug = -1; /* defaults above */
78module_param(debug, int, 0); 79module_param(debug, int, 0);
79MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 80MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
80 81
81static const struct pci_device_id skge_id_table[] = { 82static DEFINE_PCI_DEVICE_TABLE(skge_id_table) = {
82 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940) }, 83 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940) },
83 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) }, 84 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) },
84 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) }, 85 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
@@ -187,8 +188,8 @@ static void skge_wol_init(struct skge_port *skge)
187 188
188 /* Force to 10/100 skge_reset will re-enable on resume */ 189 /* Force to 10/100 skge_reset will re-enable on resume */
189 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, 190 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
190 PHY_AN_100FULL | PHY_AN_100HALF | 191 (PHY_AN_100FULL | PHY_AN_100HALF |
191 PHY_AN_10FULL | PHY_AN_10HALF| PHY_AN_CSMA); 192 PHY_AN_10FULL | PHY_AN_10HALF | PHY_AN_CSMA));
192 /* no 1000 HD/FD */ 193 /* no 1000 HD/FD */
193 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0); 194 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0);
194 gm_phy_write(hw, port, PHY_MARV_CTRL, 195 gm_phy_write(hw, port, PHY_MARV_CTRL,
@@ -257,25 +258,28 @@ static u32 skge_supported_modes(const struct skge_hw *hw)
257 u32 supported; 258 u32 supported;
258 259
259 if (hw->copper) { 260 if (hw->copper) {
260 supported = SUPPORTED_10baseT_Half 261 supported = (SUPPORTED_10baseT_Half |
261 | SUPPORTED_10baseT_Full 262 SUPPORTED_10baseT_Full |
262 | SUPPORTED_100baseT_Half 263 SUPPORTED_100baseT_Half |
263 | SUPPORTED_100baseT_Full 264 SUPPORTED_100baseT_Full |
264 | SUPPORTED_1000baseT_Half 265 SUPPORTED_1000baseT_Half |
265 | SUPPORTED_1000baseT_Full 266 SUPPORTED_1000baseT_Full |
266 | SUPPORTED_Autoneg| SUPPORTED_TP; 267 SUPPORTED_Autoneg |
268 SUPPORTED_TP);
267 269
268 if (hw->chip_id == CHIP_ID_GENESIS) 270 if (hw->chip_id == CHIP_ID_GENESIS)
269 supported &= ~(SUPPORTED_10baseT_Half 271 supported &= ~(SUPPORTED_10baseT_Half |
270 | SUPPORTED_10baseT_Full 272 SUPPORTED_10baseT_Full |
271 | SUPPORTED_100baseT_Half 273 SUPPORTED_100baseT_Half |
272 | SUPPORTED_100baseT_Full); 274 SUPPORTED_100baseT_Full);
273 275
274 else if (hw->chip_id == CHIP_ID_YUKON) 276 else if (hw->chip_id == CHIP_ID_YUKON)
275 supported &= ~SUPPORTED_1000baseT_Half; 277 supported &= ~SUPPORTED_1000baseT_Half;
276 } else 278 } else
277 supported = SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half 279 supported = (SUPPORTED_1000baseT_Full |
278 | SUPPORTED_FIBRE | SUPPORTED_Autoneg; 280 SUPPORTED_1000baseT_Half |
281 SUPPORTED_FIBRE |
282 SUPPORTED_Autoneg);
279 283
280 return supported; 284 return supported;
281} 285}
@@ -365,7 +369,7 @@ static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
365 } 369 }
366 } 370 }
367 371
368 return (0); 372 return 0;
369} 373}
370 374
371static void skge_get_drvinfo(struct net_device *dev, 375static void skge_get_drvinfo(struct net_device *dev,
@@ -812,7 +816,7 @@ static int skge_get_eeprom_len(struct net_device *dev)
812 u32 reg2; 816 u32 reg2;
813 817
814 pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, &reg2); 818 pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, &reg2);
815 return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); 819 return 1 << (((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
816} 820}
817 821
818static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset) 822static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset)
@@ -1043,7 +1047,7 @@ static int skge_rx_fill(struct net_device *dev)
1043 1047
1044 skb_reserve(skb, NET_IP_ALIGN); 1048 skb_reserve(skb, NET_IP_ALIGN);
1045 skge_rx_setup(skge, e, skb, skge->rx_buf_size); 1049 skge_rx_setup(skge, e, skb, skge->rx_buf_size);
1046 } while ( (e = e->next) != ring->start); 1050 } while ((e = e->next) != ring->start);
1047 1051
1048 ring->to_clean = ring->start; 1052 ring->to_clean = ring->start;
1049 return 0; 1053 return 0;
@@ -1051,7 +1055,7 @@ static int skge_rx_fill(struct net_device *dev)
1051 1055
1052static const char *skge_pause(enum pause_status status) 1056static const char *skge_pause(enum pause_status status)
1053{ 1057{
1054 switch(status) { 1058 switch (status) {
1055 case FLOW_STAT_NONE: 1059 case FLOW_STAT_NONE:
1056 return "none"; 1060 return "none";
1057 case FLOW_STAT_REM_SEND: 1061 case FLOW_STAT_REM_SEND:
@@ -1074,13 +1078,11 @@ static void skge_link_up(struct skge_port *skge)
1074 netif_carrier_on(skge->netdev); 1078 netif_carrier_on(skge->netdev);
1075 netif_wake_queue(skge->netdev); 1079 netif_wake_queue(skge->netdev);
1076 1080
1077 if (netif_msg_link(skge)) { 1081 netif_info(skge, link, skge->netdev,
1078 printk(KERN_INFO PFX 1082 "Link is up at %d Mbps, %s duplex, flow control %s\n",
1079 "%s: Link is up at %d Mbps, %s duplex, flow control %s\n", 1083 skge->speed,
1080 skge->netdev->name, skge->speed, 1084 skge->duplex == DUPLEX_FULL ? "full" : "half",
1081 skge->duplex == DUPLEX_FULL ? "full" : "half", 1085 skge_pause(skge->flow_status));
1082 skge_pause(skge->flow_status));
1083 }
1084} 1086}
1085 1087
1086static void skge_link_down(struct skge_port *skge) 1088static void skge_link_down(struct skge_port *skge)
@@ -1089,8 +1091,7 @@ static void skge_link_down(struct skge_port *skge)
1089 netif_carrier_off(skge->netdev); 1091 netif_carrier_off(skge->netdev);
1090 netif_stop_queue(skge->netdev); 1092 netif_stop_queue(skge->netdev);
1091 1093
1092 if (netif_msg_link(skge)) 1094 netif_info(skge, link, skge->netdev, "Link is down\n");
1093 printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name);
1094} 1095}
1095 1096
1096 1097
@@ -1132,8 +1133,7 @@ static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
1132{ 1133{
1133 u16 v = 0; 1134 u16 v = 0;
1134 if (__xm_phy_read(hw, port, reg, &v)) 1135 if (__xm_phy_read(hw, port, reg, &v))
1135 printk(KERN_WARNING PFX "%s: phy read timed out\n", 1136 pr_warning("%s: phy read timed out\n", hw->dev[port]->name);
1136 hw->dev[port]->name);
1137 return v; 1137 return v;
1138} 1138}
1139 1139
@@ -1255,8 +1255,7 @@ static void bcom_check_link(struct skge_hw *hw, int port)
1255 1255
1256 lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); 1256 lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
1257 if (lpa & PHY_B_AN_RF) { 1257 if (lpa & PHY_B_AN_RF) {
1258 printk(KERN_NOTICE PFX "%s: remote fault\n", 1258 netdev_notice(dev, "remote fault\n");
1259 dev->name);
1260 return; 1259 return;
1261 } 1260 }
1262 1261
@@ -1271,8 +1270,7 @@ static void bcom_check_link(struct skge_hw *hw, int port)
1271 skge->duplex = DUPLEX_HALF; 1270 skge->duplex = DUPLEX_HALF;
1272 break; 1271 break;
1273 default: 1272 default:
1274 printk(KERN_NOTICE PFX "%s: duplex mismatch\n", 1273 netdev_notice(dev, "duplex mismatch\n");
1275 dev->name);
1276 return; 1274 return;
1277 } 1275 }
1278 1276
@@ -1327,7 +1325,7 @@ static void bcom_phy_init(struct skge_port *skge)
1327 /* Optimize MDIO transfer by suppressing preamble. */ 1325 /* Optimize MDIO transfer by suppressing preamble. */
1328 r = xm_read16(hw, port, XM_MMU_CMD); 1326 r = xm_read16(hw, port, XM_MMU_CMD);
1329 r |= XM_MMU_NO_PRE; 1327 r |= XM_MMU_NO_PRE;
1330 xm_write16(hw, port, XM_MMU_CMD,r); 1328 xm_write16(hw, port, XM_MMU_CMD, r);
1331 1329
1332 switch (id1) { 1330 switch (id1) {
1333 case PHY_BCOM_ID1_C0: 1331 case PHY_BCOM_ID1_C0:
@@ -1464,8 +1462,7 @@ static int xm_check_link(struct net_device *dev)
1464 1462
1465 lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); 1463 lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
1466 if (lpa & PHY_B_AN_RF) { 1464 if (lpa & PHY_B_AN_RF) {
1467 printk(KERN_NOTICE PFX "%s: remote fault\n", 1465 netdev_notice(dev, "remote fault\n");
1468 dev->name);
1469 return 0; 1466 return 0;
1470 } 1467 }
1471 1468
@@ -1480,8 +1477,7 @@ static int xm_check_link(struct net_device *dev)
1480 skge->duplex = DUPLEX_HALF; 1477 skge->duplex = DUPLEX_HALF;
1481 break; 1478 break;
1482 default: 1479 default:
1483 printk(KERN_NOTICE PFX "%s: duplex mismatch\n", 1480 netdev_notice(dev, "duplex mismatch\n");
1484 dev->name);
1485 return 0; 1481 return 0;
1486 } 1482 }
1487 1483
@@ -1519,7 +1515,7 @@ static void xm_link_timer(unsigned long arg)
1519{ 1515{
1520 struct skge_port *skge = (struct skge_port *) arg; 1516 struct skge_port *skge = (struct skge_port *) arg;
1521 struct net_device *dev = skge->netdev; 1517 struct net_device *dev = skge->netdev;
1522 struct skge_hw *hw = skge->hw; 1518 struct skge_hw *hw = skge->hw;
1523 int port = skge->port; 1519 int port = skge->port;
1524 int i; 1520 int i;
1525 unsigned long flags; 1521 unsigned long flags;
@@ -1538,7 +1534,7 @@ static void xm_link_timer(unsigned long arg)
1538 goto link_down; 1534 goto link_down;
1539 } 1535 }
1540 1536
1541 /* Re-enable interrupt to detect link down */ 1537 /* Re-enable interrupt to detect link down */
1542 if (xm_check_link(dev)) { 1538 if (xm_check_link(dev)) {
1543 u16 msk = xm_read16(hw, port, XM_IMSK); 1539 u16 msk = xm_read16(hw, port, XM_IMSK);
1544 msk &= ~XM_IS_INP_ASS; 1540 msk &= ~XM_IS_INP_ASS;
@@ -1569,7 +1565,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1569 udelay(1); 1565 udelay(1);
1570 } 1566 }
1571 1567
1572 printk(KERN_WARNING PFX "%s: genesis reset failed\n", dev->name); 1568 netdev_warn(dev, "genesis reset failed\n");
1573 1569
1574 reset_ok: 1570 reset_ok:
1575 /* Unreset the XMAC. */ 1571 /* Unreset the XMAC. */
@@ -1595,7 +1591,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1595 } 1591 }
1596 1592
1597 1593
1598 switch(hw->phy_type) { 1594 switch (hw->phy_type) {
1599 case SK_PHY_XMAC: 1595 case SK_PHY_XMAC:
1600 xm_phy_init(skge); 1596 xm_phy_init(skge);
1601 break; 1597 break;
@@ -1702,7 +1698,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1702 1698
1703 if (jumbo) { 1699 if (jumbo) {
1704 /* Enable frame flushing if jumbo frames used */ 1700 /* Enable frame flushing if jumbo frames used */
1705 skge_write16(hw, SK_REG(port,RX_MFF_CTRL1), MFF_ENA_FLUSH); 1701 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_FLUSH);
1706 } else { 1702 } else {
1707 /* enable timeout timers if normal frames */ 1703 /* enable timeout timers if normal frames */
1708 skge_write16(hw, B3_PA_CTRL, 1704 skge_write16(hw, B3_PA_CTRL,
@@ -1717,7 +1713,7 @@ static void genesis_stop(struct skge_port *skge)
1717 unsigned retries = 1000; 1713 unsigned retries = 1000;
1718 u16 cmd; 1714 u16 cmd;
1719 1715
1720 /* Disable Tx and Rx */ 1716 /* Disable Tx and Rx */
1721 cmd = xm_read16(hw, port, XM_MMU_CMD); 1717 cmd = xm_read16(hw, port, XM_MMU_CMD);
1722 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); 1718 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1723 xm_write16(hw, port, XM_MMU_CMD, cmd); 1719 xm_write16(hw, port, XM_MMU_CMD, cmd);
@@ -1792,12 +1788,11 @@ static void genesis_mac_intr(struct skge_hw *hw, int port)
1792 struct skge_port *skge = netdev_priv(dev); 1788 struct skge_port *skge = netdev_priv(dev);
1793 u16 status = xm_read16(hw, port, XM_ISRC); 1789 u16 status = xm_read16(hw, port, XM_ISRC);
1794 1790
1795 if (netif_msg_intr(skge)) 1791 netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
1796 printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n", 1792 "mac interrupt status 0x%x\n", status);
1797 dev->name, status);
1798 1793
1799 if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) { 1794 if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) {
1800 xm_link_down(hw, port); 1795 xm_link_down(hw, port);
1801 mod_timer(&skge->link_timer, jiffies + 1); 1796 mod_timer(&skge->link_timer, jiffies + 1);
1802 } 1797 }
1803 1798
@@ -1831,7 +1826,7 @@ static void genesis_link_up(struct skge_port *skge)
1831 xm_write16(hw, port, XM_MMU_CMD, cmd); 1826 xm_write16(hw, port, XM_MMU_CMD, cmd);
1832 1827
1833 mode = xm_read32(hw, port, XM_MODE); 1828 mode = xm_read32(hw, port, XM_MODE);
1834 if (skge->flow_status== FLOW_STAT_SYMMETRIC || 1829 if (skge->flow_status == FLOW_STAT_SYMMETRIC ||
1835 skge->flow_status == FLOW_STAT_LOC_SEND) { 1830 skge->flow_status == FLOW_STAT_LOC_SEND) {
1836 /* 1831 /*
1837 * Configure Pause Frame Generation 1832 * Configure Pause Frame Generation
@@ -1898,12 +1893,11 @@ static inline void bcom_phy_intr(struct skge_port *skge)
1898 u16 isrc; 1893 u16 isrc;
1899 1894
1900 isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT); 1895 isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT);
1901 if (netif_msg_intr(skge)) 1896 netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
1902 printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x\n", 1897 "phy interrupt status 0x%x\n", isrc);
1903 skge->netdev->name, isrc);
1904 1898
1905 if (isrc & PHY_B_IS_PSE) 1899 if (isrc & PHY_B_IS_PSE)
1906 printk(KERN_ERR PFX "%s: uncorrectable pair swap error\n", 1900 pr_err("%s: uncorrectable pair swap error\n",
1907 hw->dev[port]->name); 1901 hw->dev[port]->name);
1908 1902
1909 /* Workaround BCom Errata: 1903 /* Workaround BCom Errata:
@@ -1936,8 +1930,7 @@ static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
1936 return 0; 1930 return 0;
1937 } 1931 }
1938 1932
1939 printk(KERN_WARNING PFX "%s: phy write timeout\n", 1933 pr_warning("%s: phy write timeout\n", hw->dev[port]->name);
1940 hw->dev[port]->name);
1941 return -EIO; 1934 return -EIO;
1942} 1935}
1943 1936
@@ -1965,8 +1958,7 @@ static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
1965{ 1958{
1966 u16 v = 0; 1959 u16 v = 0;
1967 if (__gm_phy_read(hw, port, reg, &v)) 1960 if (__gm_phy_read(hw, port, reg, &v))
1968 printk(KERN_WARNING PFX "%s: phy read timeout\n", 1961 pr_warning("%s: phy read timeout\n", hw->dev[port]->name);
1969 hw->dev[port]->name);
1970 return v; 1962 return v;
1971} 1963}
1972 1964
@@ -2298,9 +2290,8 @@ static void yukon_mac_intr(struct skge_hw *hw, int port)
2298 struct skge_port *skge = netdev_priv(dev); 2290 struct skge_port *skge = netdev_priv(dev);
2299 u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); 2291 u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
2300 2292
2301 if (netif_msg_intr(skge)) 2293 netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
2302 printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n", 2294 "mac interrupt status 0x%x\n", status);
2303 dev->name, status);
2304 2295
2305 if (status & GM_IS_RX_FF_OR) { 2296 if (status & GM_IS_RX_FF_OR) {
2306 ++dev->stats.rx_fifo_errors; 2297 ++dev->stats.rx_fifo_errors;
@@ -2379,9 +2370,8 @@ static void yukon_phy_intr(struct skge_port *skge)
2379 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); 2370 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
2380 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); 2371 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
2381 2372
2382 if (netif_msg_intr(skge)) 2373 netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
2383 printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x 0x%x\n", 2374 "phy interrupt status 0x%x 0x%x\n", istatus, phystat);
2384 skge->netdev->name, istatus, phystat);
2385 2375
2386 if (istatus & PHY_M_IS_AN_COMPL) { 2376 if (istatus & PHY_M_IS_AN_COMPL) {
2387 if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP) 2377 if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP)
@@ -2441,8 +2431,7 @@ static void yukon_phy_intr(struct skge_port *skge)
2441 } 2431 }
2442 return; 2432 return;
2443 failed: 2433 failed:
2444 printk(KERN_ERR PFX "%s: autonegotiation failed (%s)\n", 2434 pr_err("%s: autonegotiation failed (%s)\n", skge->netdev->name, reason);
2445 skge->netdev->name, reason);
2446 2435
2447 /* XXX restart autonegotiation? */ 2436 /* XXX restart autonegotiation? */
2448} 2437}
@@ -2480,7 +2469,7 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2480 if (!netif_running(dev)) 2469 if (!netif_running(dev))
2481 return -ENODEV; /* Phy still in reset */ 2470 return -ENODEV; /* Phy still in reset */
2482 2471
2483 switch(cmd) { 2472 switch (cmd) {
2484 case SIOCGMIIPHY: 2473 case SIOCGMIIPHY:
2485 data->phy_id = hw->phy_addr; 2474 data->phy_id = hw->phy_addr;
2486 2475
@@ -2571,8 +2560,7 @@ static int skge_up(struct net_device *dev)
2571 if (!is_valid_ether_addr(dev->dev_addr)) 2560 if (!is_valid_ether_addr(dev->dev_addr))
2572 return -EINVAL; 2561 return -EINVAL;
2573 2562
2574 if (netif_msg_ifup(skge)) 2563 netif_info(skge, ifup, skge->netdev, "enabling interface\n");
2575 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
2576 2564
2577 if (dev->mtu > RX_BUF_SIZE) 2565 if (dev->mtu > RX_BUF_SIZE)
2578 skge->rx_buf_size = dev->mtu + ETH_HLEN; 2566 skge->rx_buf_size = dev->mtu + ETH_HLEN;
@@ -2670,8 +2658,7 @@ static int skge_down(struct net_device *dev)
2670 if (skge->mem == NULL) 2658 if (skge->mem == NULL)
2671 return 0; 2659 return 0;
2672 2660
2673 if (netif_msg_ifdown(skge)) 2661 netif_info(skge, ifdown, skge->netdev, "disabling interface\n");
2674 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
2675 2662
2676 netif_tx_disable(dev); 2663 netif_tx_disable(dev);
2677 2664
@@ -2781,7 +2768,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2781 * does. Looks like hardware is wrong? 2768 * does. Looks like hardware is wrong?
2782 */ 2769 */
2783 if (ipip_hdr(skb)->protocol == IPPROTO_UDP && 2770 if (ipip_hdr(skb)->protocol == IPPROTO_UDP &&
2784 hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) 2771 hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON)
2785 control = BMU_TCP_CHECK; 2772 control = BMU_TCP_CHECK;
2786 else 2773 else
2787 control = BMU_UDP_CHECK; 2774 control = BMU_UDP_CHECK;
@@ -2793,7 +2780,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2793 control = BMU_CHECK; 2780 control = BMU_CHECK;
2794 2781
2795 if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */ 2782 if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */
2796 control |= BMU_EOF| BMU_IRQ_EOF; 2783 control |= BMU_EOF | BMU_IRQ_EOF;
2797 else { 2784 else {
2798 struct skge_tx_desc *tf = td; 2785 struct skge_tx_desc *tf = td;
2799 2786
@@ -2825,15 +2812,15 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2825 2812
2826 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); 2813 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
2827 2814
2828 if (unlikely(netif_msg_tx_queued(skge))) 2815 netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev,
2829 printk(KERN_DEBUG "%s: tx queued, slot %td, len %d\n", 2816 "tx queued, slot %td, len %d\n",
2830 dev->name, e - skge->tx_ring.start, skb->len); 2817 e - skge->tx_ring.start, skb->len);
2831 2818
2832 skge->tx_ring.to_use = e->next; 2819 skge->tx_ring.to_use = e->next;
2833 smp_wmb(); 2820 smp_wmb();
2834 2821
2835 if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) { 2822 if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) {
2836 pr_debug("%s: transmit queue full\n", dev->name); 2823 netdev_dbg(dev, "transmit queue full\n");
2837 netif_stop_queue(dev); 2824 netif_stop_queue(dev);
2838 } 2825 }
2839 2826
@@ -2858,9 +2845,8 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
2858 PCI_DMA_TODEVICE); 2845 PCI_DMA_TODEVICE);
2859 2846
2860 if (control & BMU_EOF) { 2847 if (control & BMU_EOF) {
2861 if (unlikely(netif_msg_tx_done(skge))) 2848 netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
2862 printk(KERN_DEBUG PFX "%s: tx done slot %td\n", 2849 "tx done slot %td\n", e - skge->tx_ring.start);
2863 skge->netdev->name, e - skge->tx_ring.start);
2864 2850
2865 dev_kfree_skb(e->skb); 2851 dev_kfree_skb(e->skb);
2866 } 2852 }
@@ -2885,8 +2871,7 @@ static void skge_tx_timeout(struct net_device *dev)
2885{ 2871{
2886 struct skge_port *skge = netdev_priv(dev); 2872 struct skge_port *skge = netdev_priv(dev);
2887 2873
2888 if (netif_msg_timer(skge)) 2874 netif_printk(skge, timer, KERN_DEBUG, skge->netdev, "tx timeout\n");
2889 printk(KERN_DEBUG PFX "%s: tx timeout\n", dev->name);
2890 2875
2891 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); 2876 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
2892 skge_tx_clean(dev); 2877 skge_tx_clean(dev);
@@ -2932,8 +2917,7 @@ static void genesis_set_multicast(struct net_device *dev)
2932 struct skge_port *skge = netdev_priv(dev); 2917 struct skge_port *skge = netdev_priv(dev);
2933 struct skge_hw *hw = skge->hw; 2918 struct skge_hw *hw = skge->hw;
2934 int port = skge->port; 2919 int port = skge->port;
2935 int i, count = dev->mc_count; 2920 struct dev_mc_list *list;
2936 struct dev_mc_list *list = dev->mc_list;
2937 u32 mode; 2921 u32 mode;
2938 u8 filter[8]; 2922 u8 filter[8];
2939 2923
@@ -2953,7 +2937,7 @@ static void genesis_set_multicast(struct net_device *dev)
2953 skge->flow_status == FLOW_STAT_SYMMETRIC) 2937 skge->flow_status == FLOW_STAT_SYMMETRIC)
2954 genesis_add_filter(filter, pause_mc_addr); 2938 genesis_add_filter(filter, pause_mc_addr);
2955 2939
2956 for (i = 0; list && i < count; i++, list = list->next) 2940 netdev_for_each_mc_addr(list, dev)
2957 genesis_add_filter(filter, list->dmi_addr); 2941 genesis_add_filter(filter, list->dmi_addr);
2958 } 2942 }
2959 2943
@@ -2972,7 +2956,7 @@ static void yukon_set_multicast(struct net_device *dev)
2972 struct skge_port *skge = netdev_priv(dev); 2956 struct skge_port *skge = netdev_priv(dev);
2973 struct skge_hw *hw = skge->hw; 2957 struct skge_hw *hw = skge->hw;
2974 int port = skge->port; 2958 int port = skge->port;
2975 struct dev_mc_list *list = dev->mc_list; 2959 struct dev_mc_list *list;
2976 int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND || 2960 int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND ||
2977 skge->flow_status == FLOW_STAT_SYMMETRIC); 2961 skge->flow_status == FLOW_STAT_SYMMETRIC);
2978 u16 reg; 2962 u16 reg;
@@ -2987,16 +2971,15 @@ static void yukon_set_multicast(struct net_device *dev)
2987 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 2971 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
2988 else if (dev->flags & IFF_ALLMULTI) /* all multicast */ 2972 else if (dev->flags & IFF_ALLMULTI) /* all multicast */
2989 memset(filter, 0xff, sizeof(filter)); 2973 memset(filter, 0xff, sizeof(filter));
2990 else if (dev->mc_count == 0 && !rx_pause)/* no multicast */ 2974 else if (netdev_mc_empty(dev) && !rx_pause)/* no multicast */
2991 reg &= ~GM_RXCR_MCF_ENA; 2975 reg &= ~GM_RXCR_MCF_ENA;
2992 else { 2976 else {
2993 int i;
2994 reg |= GM_RXCR_MCF_ENA; 2977 reg |= GM_RXCR_MCF_ENA;
2995 2978
2996 if (rx_pause) 2979 if (rx_pause)
2997 yukon_add_filter(filter, pause_mc_addr); 2980 yukon_add_filter(filter, pause_mc_addr);
2998 2981
2999 for (i = 0; list && i < dev->mc_count; i++, list = list->next) 2982 netdev_for_each_mc_addr(list, dev)
3000 yukon_add_filter(filter, list->dmi_addr); 2983 yukon_add_filter(filter, list->dmi_addr);
3001 } 2984 }
3002 2985
@@ -3054,10 +3037,9 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3054 struct sk_buff *skb; 3037 struct sk_buff *skb;
3055 u16 len = control & BMU_BBC; 3038 u16 len = control & BMU_BBC;
3056 3039
3057 if (unlikely(netif_msg_rx_status(skge))) 3040 netif_printk(skge, rx_status, KERN_DEBUG, skge->netdev,
3058 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n", 3041 "rx slot %td status 0x%x len %d\n",
3059 dev->name, e - skge->rx_ring.start, 3042 e - skge->rx_ring.start, status, len);
3060 status, len);
3061 3043
3062 if (len > skge->rx_buf_size) 3044 if (len > skge->rx_buf_size)
3063 goto error; 3045 goto error;
@@ -3096,7 +3078,7 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3096 pci_unmap_len(e, maplen), 3078 pci_unmap_len(e, maplen),
3097 PCI_DMA_FROMDEVICE); 3079 PCI_DMA_FROMDEVICE);
3098 skb = e->skb; 3080 skb = e->skb;
3099 prefetch(skb->data); 3081 prefetch(skb->data);
3100 skge_rx_setup(skge, e, nskb, skge->rx_buf_size); 3082 skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
3101 } 3083 }
3102 3084
@@ -3111,10 +3093,9 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3111 return skb; 3093 return skb;
3112error: 3094error:
3113 3095
3114 if (netif_msg_rx_err(skge)) 3096 netif_printk(skge, rx_err, KERN_DEBUG, skge->netdev,
3115 printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n", 3097 "rx err, slot %td control 0x%x status 0x%x\n",
3116 dev->name, e - skge->rx_ring.start, 3098 e - skge->rx_ring.start, control, status);
3117 control, status);
3118 3099
3119 if (skge->hw->chip_id == CHIP_ID_GENESIS) { 3100 if (skge->hw->chip_id == CHIP_ID_GENESIS) {
3120 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) 3101 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
@@ -3574,8 +3555,7 @@ static int skge_reset(struct skge_hw *hw)
3574 hw->ram_offset = 0x80000; 3555 hw->ram_offset = 0x80000;
3575 } else 3556 } else
3576 hw->ram_size = t8 * 512; 3557 hw->ram_size = t8 * 512;
3577 } 3558 } else if (t8 == 0)
3578 else if (t8 == 0)
3579 hw->ram_size = 0x20000; 3559 hw->ram_size = 0x20000;
3580 else 3560 else
3581 hw->ram_size = t8 * 4096; 3561 hw->ram_size = t8 * 4096;
@@ -3729,7 +3709,7 @@ static int skge_device_event(struct notifier_block *unused,
3729 goto done; 3709 goto done;
3730 3710
3731 skge = netdev_priv(dev); 3711 skge = netdev_priv(dev);
3732 switch(event) { 3712 switch (event) {
3733 case NETDEV_CHANGENAME: 3713 case NETDEV_CHANGENAME:
3734 if (skge->debugfs) { 3714 if (skge->debugfs) {
3735 d = debugfs_rename(skge_debug, skge->debugfs, 3715 d = debugfs_rename(skge_debug, skge->debugfs,
@@ -3737,7 +3717,7 @@ static int skge_device_event(struct notifier_block *unused,
3737 if (d) 3717 if (d)
3738 skge->debugfs = d; 3718 skge->debugfs = d;
3739 else { 3719 else {
3740 pr_info(PFX "%s: rename failed\n", dev->name); 3720 netdev_info(dev, "rename failed\n");
3741 debugfs_remove(skge->debugfs); 3721 debugfs_remove(skge->debugfs);
3742 } 3722 }
3743 } 3723 }
@@ -3755,8 +3735,7 @@ static int skge_device_event(struct notifier_block *unused,
3755 skge_debug, dev, 3735 skge_debug, dev,
3756 &skge_debug_fops); 3736 &skge_debug_fops);
3757 if (!d || IS_ERR(d)) 3737 if (!d || IS_ERR(d))
3758 pr_info(PFX "%s: debugfs create failed\n", 3738 netdev_info(dev, "debugfs create failed\n");
3759 dev->name);
3760 else 3739 else
3761 skge->debugfs = d; 3740 skge->debugfs = d;
3762 break; 3741 break;
@@ -3777,7 +3756,7 @@ static __init void skge_debug_init(void)
3777 3756
3778 ent = debugfs_create_dir("skge", NULL); 3757 ent = debugfs_create_dir("skge", NULL);
3779 if (!ent || IS_ERR(ent)) { 3758 if (!ent || IS_ERR(ent)) {
3780 pr_info(PFX "debugfs create directory failed\n"); 3759 pr_info("debugfs create directory failed\n");
3781 return; 3760 return;
3782 } 3761 }
3783 3762
@@ -3885,9 +3864,7 @@ static void __devinit skge_show_addr(struct net_device *dev)
3885{ 3864{
3886 const struct skge_port *skge = netdev_priv(dev); 3865 const struct skge_port *skge = netdev_priv(dev);
3887 3866
3888 if (netif_msg_probe(skge)) 3867 netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr);
3889 printk(KERN_INFO PFX "%s: addr %pM\n",
3890 dev->name, dev->dev_addr);
3891} 3868}
3892 3869
3893static int __devinit skge_probe(struct pci_dev *pdev, 3870static int __devinit skge_probe(struct pci_dev *pdev,
@@ -3937,7 +3914,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3937 3914
3938 err = -ENOMEM; 3915 err = -ENOMEM;
3939 /* space for skge@pci:0000:04:00.0 */ 3916 /* space for skge@pci:0000:04:00.0 */
3940 hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:" ) 3917 hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
3941 + strlen(pci_name(pdev)) + 1, GFP_KERNEL); 3918 + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
3942 if (!hw) { 3919 if (!hw) {
3943 dev_err(&pdev->dev, "cannot allocate hardware struct\n"); 3920 dev_err(&pdev->dev, "cannot allocate hardware struct\n");
@@ -3960,9 +3937,10 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3960 if (err) 3937 if (err)
3961 goto err_out_iounmap; 3938 goto err_out_iounmap;
3962 3939
3963 printk(KERN_INFO PFX DRV_VERSION " addr 0x%llx irq %d chip %s rev %d\n", 3940 pr_info("%s addr 0x%llx irq %d chip %s rev %d\n",
3964 (unsigned long long)pci_resource_start(pdev, 0), pdev->irq, 3941 DRV_VERSION,
3965 skge_board_name(hw), hw->chip_rev); 3942 (unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
3943 skge_board_name(hw), hw->chip_rev);
3966 3944
3967 dev = skge_devinit(hw, 0, using_dac); 3945 dev = skge_devinit(hw, 0, using_dac);
3968 if (!dev) 3946 if (!dev)
@@ -4032,7 +4010,8 @@ static void __devexit skge_remove(struct pci_dev *pdev)
4032 4010
4033 flush_scheduled_work(); 4011 flush_scheduled_work();
4034 4012
4035 if ((dev1 = hw->dev[1])) 4013 dev1 = hw->dev[1];
4014 if (dev1)
4036 unregister_netdev(dev1); 4015 unregister_netdev(dev1);
4037 dev0 = hw->dev[0]; 4016 dev0 = hw->dev[0];
4038 unregister_netdev(dev0); 4017 unregister_netdev(dev0);
@@ -4119,8 +4098,7 @@ static int skge_resume(struct pci_dev *pdev)
4119 err = skge_up(dev); 4098 err = skge_up(dev);
4120 4099
4121 if (err) { 4100 if (err) {
4122 printk(KERN_ERR PFX "%s: could not up: %d\n", 4101 netdev_err(dev, "could not up: %d\n", err);
4123 dev->name, err);
4124 dev_close(dev); 4102 dev_close(dev);
4125 goto out; 4103 goto out;
4126 } 4104 }
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 67249c3c9f50..653bdd76ef46 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -22,6 +22,8 @@
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */ 23 */
24 24
25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26
25#include <linux/crc32.h> 27#include <linux/crc32.h>
26#include <linux/kernel.h> 28#include <linux/kernel.h>
27#include <linux/module.h> 29#include <linux/module.h>
@@ -50,8 +52,7 @@
50#include "sky2.h" 52#include "sky2.h"
51 53
52#define DRV_NAME "sky2" 54#define DRV_NAME "sky2"
53#define DRV_VERSION "1.26" 55#define DRV_VERSION "1.27"
54#define PFX DRV_NAME " "
55 56
56/* 57/*
57 * The Yukon II chipset takes 64 bit command blocks (called list elements) 58 * The Yukon II chipset takes 64 bit command blocks (called list elements)
@@ -251,6 +252,8 @@ static void sky2_power_on(struct sky2_hw *hw)
251 252
252 sky2_pci_write32(hw, PCI_CFG_REG_1, 0); 253 sky2_pci_write32(hw, PCI_CFG_REG_1, 0);
253 254
255 sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON);
256
254 /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */ 257 /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */
255 reg = sky2_read32(hw, B2_GP_IO); 258 reg = sky2_read32(hw, B2_GP_IO);
256 reg |= GLB_GPIO_STAT_RACE_DIS; 259 reg |= GLB_GPIO_STAT_RACE_DIS;
@@ -731,7 +734,6 @@ static void sky2_wol_init(struct sky2_port *sky2)
731 unsigned port = sky2->port; 734 unsigned port = sky2->port;
732 enum flow_control save_mode; 735 enum flow_control save_mode;
733 u16 ctrl; 736 u16 ctrl;
734 u32 reg1;
735 737
736 /* Bring hardware out of reset */ 738 /* Bring hardware out of reset */
737 sky2_write16(hw, B0_CTST, CS_RST_CLR); 739 sky2_write16(hw, B0_CTST, CS_RST_CLR);
@@ -782,14 +784,11 @@ static void sky2_wol_init(struct sky2_port *sky2)
782 ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; 784 ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
783 sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); 785 sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
784 786
785 /* Turn on legacy PCI-Express PME mode */ 787 /* Disable PiG firmware */
786 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 788 sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF);
787 reg1 |= PCI_Y2_PME_LEGACY;
788 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
789 789
790 /* block receiver */ 790 /* block receiver */
791 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); 791 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
792
793} 792}
794 793
795static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port) 794static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
@@ -800,29 +799,15 @@ static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
800 hw->chip_rev != CHIP_REV_YU_EX_A0) || 799 hw->chip_rev != CHIP_REV_YU_EX_A0) ||
801 hw->chip_id >= CHIP_ID_YUKON_FE_P) { 800 hw->chip_id >= CHIP_ID_YUKON_FE_P) {
802 /* Yukon-Extreme B0 and further Extreme devices */ 801 /* Yukon-Extreme B0 and further Extreme devices */
803 /* enable Store & Forward mode for TX */ 802 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
804 803 } else if (dev->mtu > ETH_DATA_LEN) {
805 if (dev->mtu <= ETH_DATA_LEN) 804 /* set Tx GMAC FIFO Almost Empty Threshold */
806 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 805 sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
807 TX_JUMBO_DIS | TX_STFW_ENA); 806 (ECU_JUMBO_WM << 16) | ECU_AE_THR);
808
809 else
810 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
811 TX_JUMBO_ENA| TX_STFW_ENA);
812 } else {
813 if (dev->mtu <= ETH_DATA_LEN)
814 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
815 else {
816 /* set Tx GMAC FIFO Almost Empty Threshold */
817 sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
818 (ECU_JUMBO_WM << 16) | ECU_AE_THR);
819
820 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
821 807
822 /* Can't do offload because of lack of store/forward */ 808 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
823 dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | NETIF_F_ALL_CSUM); 809 } else
824 } 810 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
825 }
826} 811}
827 812
828static void sky2_mac_init(struct sky2_hw *hw, unsigned port) 813static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
@@ -1065,6 +1050,40 @@ static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
1065 return le; 1050 return le;
1066} 1051}
1067 1052
1053static unsigned sky2_get_rx_threshold(struct sky2_port* sky2)
1054{
1055 unsigned size;
1056
1057 /* Space needed for frame data + headers rounded up */
1058 size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
1059
1060 /* Stopping point for hardware truncation */
1061 return (size - 8) / sizeof(u32);
1062}
1063
1064static unsigned sky2_get_rx_data_size(struct sky2_port* sky2)
1065{
1066 struct rx_ring_info *re;
1067 unsigned size;
1068
1069 /* Space needed for frame data + headers rounded up */
1070 size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
1071
1072 sky2->rx_nfrags = size >> PAGE_SHIFT;
1073 BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr));
1074
1075 /* Compute residue after pages */
1076 size -= sky2->rx_nfrags << PAGE_SHIFT;
1077
1078 /* Optimize to handle small packets and headers */
1079 if (size < copybreak)
1080 size = copybreak;
1081 if (size < ETH_HLEN)
1082 size = ETH_HLEN;
1083
1084 return size;
1085}
1086
1068/* Build description to hardware for one receive segment */ 1087/* Build description to hardware for one receive segment */
1069static void sky2_rx_add(struct sky2_port *sky2, u8 op, 1088static void sky2_rx_add(struct sky2_port *sky2, u8 op,
1070 dma_addr_t map, unsigned len) 1089 dma_addr_t map, unsigned len)
@@ -1103,18 +1122,39 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
1103 int i; 1122 int i;
1104 1123
1105 re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE); 1124 re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1106 if (unlikely(pci_dma_mapping_error(pdev, re->data_addr))) 1125 if (pci_dma_mapping_error(pdev, re->data_addr))
1107 return -EIO; 1126 goto mapping_error;
1108 1127
1109 pci_unmap_len_set(re, data_size, size); 1128 pci_unmap_len_set(re, data_size, size);
1110 1129
1111 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1130 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1112 re->frag_addr[i] = pci_map_page(pdev, 1131 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1113 skb_shinfo(skb)->frags[i].page, 1132
1114 skb_shinfo(skb)->frags[i].page_offset, 1133 re->frag_addr[i] = pci_map_page(pdev, frag->page,
1115 skb_shinfo(skb)->frags[i].size, 1134 frag->page_offset,
1135 frag->size,
1116 PCI_DMA_FROMDEVICE); 1136 PCI_DMA_FROMDEVICE);
1137
1138 if (pci_dma_mapping_error(pdev, re->frag_addr[i]))
1139 goto map_page_error;
1140 }
1117 return 0; 1141 return 0;
1142
1143map_page_error:
1144 while (--i >= 0) {
1145 pci_unmap_page(pdev, re->frag_addr[i],
1146 skb_shinfo(skb)->frags[i].size,
1147 PCI_DMA_FROMDEVICE);
1148 }
1149
1150 pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size),
1151 PCI_DMA_FROMDEVICE);
1152
1153mapping_error:
1154 if (net_ratelimit())
1155 dev_warn(&pdev->dev, "%s: rx mapping error\n",
1156 skb->dev->name);
1157 return -EIO;
1118} 1158}
1119 1159
1120static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re) 1160static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
@@ -1173,8 +1213,7 @@ static void sky2_rx_stop(struct sky2_port *sky2)
1173 == sky2_read8(hw, RB_ADDR(rxq, Q_RL))) 1213 == sky2_read8(hw, RB_ADDR(rxq, Q_RL)))
1174 goto stopped; 1214 goto stopped;
1175 1215
1176 printk(KERN_WARNING PFX "%s: receiver stop failed\n", 1216 netdev_warn(sky2->netdev, "receiver stop failed\n");
1177 sky2->netdev->name);
1178stopped: 1217stopped:
1179 sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST); 1218 sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
1180 1219
@@ -1324,8 +1363,32 @@ static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
1324 sky2_put_idx(sky2->hw, rxq, sky2->rx_put); 1363 sky2_put_idx(sky2->hw, rxq, sky2->rx_put);
1325} 1364}
1326 1365
1366static int sky2_alloc_rx_skbs(struct sky2_port *sky2)
1367{
1368 struct sky2_hw *hw = sky2->hw;
1369 unsigned i;
1370
1371 sky2->rx_data_size = sky2_get_rx_data_size(sky2);
1372
1373 /* Fill Rx ring */
1374 for (i = 0; i < sky2->rx_pending; i++) {
1375 struct rx_ring_info *re = sky2->rx_ring + i;
1376
1377 re->skb = sky2_rx_alloc(sky2);
1378 if (!re->skb)
1379 return -ENOMEM;
1380
1381 if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) {
1382 dev_kfree_skb(re->skb);
1383 re->skb = NULL;
1384 return -ENOMEM;
1385 }
1386 }
1387 return 0;
1388}
1389
1327/* 1390/*
1328 * Allocate and setup receiver buffer pool. 1391 * Setup receiver buffer pool.
1329 * Normal case this ends up creating one list element for skb 1392 * Normal case this ends up creating one list element for skb
1330 * in the receive ring. Worst case if using large MTU and each 1393 * in the receive ring. Worst case if using large MTU and each
1331 * allocation falls on a different 64 bit region, that results 1394 * allocation falls on a different 64 bit region, that results
@@ -1333,12 +1396,12 @@ static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
1333 * One element is used for checksum enable/disable, and one 1396 * One element is used for checksum enable/disable, and one
1334 * extra to avoid wrap. 1397 * extra to avoid wrap.
1335 */ 1398 */
1336static int sky2_rx_start(struct sky2_port *sky2) 1399static void sky2_rx_start(struct sky2_port *sky2)
1337{ 1400{
1338 struct sky2_hw *hw = sky2->hw; 1401 struct sky2_hw *hw = sky2->hw;
1339 struct rx_ring_info *re; 1402 struct rx_ring_info *re;
1340 unsigned rxq = rxqaddr[sky2->port]; 1403 unsigned rxq = rxqaddr[sky2->port];
1341 unsigned i, size, thresh; 1404 unsigned i, thresh;
1342 1405
1343 sky2->rx_put = sky2->rx_next = 0; 1406 sky2->rx_put = sky2->rx_next = 0;
1344 sky2_qset(hw, rxq); 1407 sky2_qset(hw, rxq);
@@ -1359,40 +1422,9 @@ static int sky2_rx_start(struct sky2_port *sky2)
1359 if (!(hw->flags & SKY2_HW_NEW_LE)) 1422 if (!(hw->flags & SKY2_HW_NEW_LE))
1360 rx_set_checksum(sky2); 1423 rx_set_checksum(sky2);
1361 1424
1362 /* Space needed for frame data + headers rounded up */ 1425 /* submit Rx ring */
1363 size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
1364
1365 /* Stopping point for hardware truncation */
1366 thresh = (size - 8) / sizeof(u32);
1367
1368 sky2->rx_nfrags = size >> PAGE_SHIFT;
1369 BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr));
1370
1371 /* Compute residue after pages */
1372 size -= sky2->rx_nfrags << PAGE_SHIFT;
1373
1374 /* Optimize to handle small packets and headers */
1375 if (size < copybreak)
1376 size = copybreak;
1377 if (size < ETH_HLEN)
1378 size = ETH_HLEN;
1379
1380 sky2->rx_data_size = size;
1381
1382 /* Fill Rx ring */
1383 for (i = 0; i < sky2->rx_pending; i++) { 1426 for (i = 0; i < sky2->rx_pending; i++) {
1384 re = sky2->rx_ring + i; 1427 re = sky2->rx_ring + i;
1385
1386 re->skb = sky2_rx_alloc(sky2);
1387 if (!re->skb)
1388 goto nomem;
1389
1390 if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) {
1391 dev_kfree_skb(re->skb);
1392 re->skb = NULL;
1393 goto nomem;
1394 }
1395
1396 sky2_rx_submit(sky2, re); 1428 sky2_rx_submit(sky2, re);
1397 } 1429 }
1398 1430
@@ -1402,6 +1434,7 @@ static int sky2_rx_start(struct sky2_port *sky2)
1402 * the register is limited to 9 bits, so if you do frames > 2052 1434 * the register is limited to 9 bits, so if you do frames > 2052
1403 * you better get the MTU right! 1435 * you better get the MTU right!
1404 */ 1436 */
1437 thresh = sky2_get_rx_threshold(sky2);
1405 if (thresh > 0x1ff) 1438 if (thresh > 0x1ff)
1406 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF); 1439 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF);
1407 else { 1440 else {
@@ -1433,13 +1466,6 @@ static int sky2_rx_start(struct sky2_port *sky2)
1433 sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST), 1466 sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST),
1434 TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN); 1467 TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN);
1435 } 1468 }
1436
1437
1438
1439 return 0;
1440nomem:
1441 sky2_rx_clean(sky2);
1442 return -ENOMEM;
1443} 1469}
1444 1470
1445static int sky2_alloc_buffers(struct sky2_port *sky2) 1471static int sky2_alloc_buffers(struct sky2_port *sky2)
@@ -1470,7 +1496,7 @@ static int sky2_alloc_buffers(struct sky2_port *sky2)
1470 if (!sky2->rx_ring) 1496 if (!sky2->rx_ring)
1471 goto nomem; 1497 goto nomem;
1472 1498
1473 return 0; 1499 return sky2_alloc_rx_skbs(sky2);
1474nomem: 1500nomem:
1475 return -ENOMEM; 1501 return -ENOMEM;
1476} 1502}
@@ -1479,6 +1505,8 @@ static void sky2_free_buffers(struct sky2_port *sky2)
1479{ 1505{
1480 struct sky2_hw *hw = sky2->hw; 1506 struct sky2_hw *hw = sky2->hw;
1481 1507
1508 sky2_rx_clean(sky2);
1509
1482 if (sky2->rx_le) { 1510 if (sky2->rx_le) {
1483 pci_free_consistent(hw->pdev, RX_LE_BYTES, 1511 pci_free_consistent(hw->pdev, RX_LE_BYTES,
1484 sky2->rx_le, sky2->rx_le_map); 1512 sky2->rx_le, sky2->rx_le_map);
@@ -1497,16 +1525,16 @@ static void sky2_free_buffers(struct sky2_port *sky2)
1497 sky2->rx_ring = NULL; 1525 sky2->rx_ring = NULL;
1498} 1526}
1499 1527
1500/* Bring up network interface. */ 1528static void sky2_hw_up(struct sky2_port *sky2)
1501static int sky2_up(struct net_device *dev)
1502{ 1529{
1503 struct sky2_port *sky2 = netdev_priv(dev);
1504 struct sky2_hw *hw = sky2->hw; 1530 struct sky2_hw *hw = sky2->hw;
1505 unsigned port = sky2->port; 1531 unsigned port = sky2->port;
1506 u32 imask, ramsize; 1532 u32 ramsize;
1507 int cap, err; 1533 int cap;
1508 struct net_device *otherdev = hw->dev[sky2->port^1]; 1534 struct net_device *otherdev = hw->dev[sky2->port^1];
1509 1535
1536 tx_init(sky2);
1537
1510 /* 1538 /*
1511 * On dual port PCI-X card, there is an problem where status 1539 * On dual port PCI-X card, there is an problem where status
1512 * can be received out of order due to split transactions 1540 * can be received out of order due to split transactions
@@ -1518,16 +1546,7 @@ static int sky2_up(struct net_device *dev)
1518 cmd = sky2_pci_read16(hw, cap + PCI_X_CMD); 1546 cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
1519 cmd &= ~PCI_X_CMD_MAX_SPLIT; 1547 cmd &= ~PCI_X_CMD_MAX_SPLIT;
1520 sky2_pci_write16(hw, cap + PCI_X_CMD, cmd); 1548 sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
1521 1549 }
1522 }
1523
1524 netif_carrier_off(dev);
1525
1526 err = sky2_alloc_buffers(sky2);
1527 if (err)
1528 goto err_out;
1529
1530 tx_init(sky2);
1531 1550
1532 sky2_mac_init(hw, port); 1551 sky2_mac_init(hw, port);
1533 1552
@@ -1536,7 +1555,7 @@ static int sky2_up(struct net_device *dev)
1536 if (ramsize > 0) { 1555 if (ramsize > 0) {
1537 u32 rxspace; 1556 u32 rxspace;
1538 1557
1539 pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize); 1558 netdev_dbg(sky2->netdev, "ram buffer %dK\n", ramsize);
1540 if (ramsize < 16) 1559 if (ramsize < 16)
1541 rxspace = ramsize / 2; 1560 rxspace = ramsize / 2;
1542 else 1561 else
@@ -1568,18 +1587,33 @@ static int sky2_up(struct net_device *dev)
1568 sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL); 1587 sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
1569#endif 1588#endif
1570 1589
1571 err = sky2_rx_start(sky2); 1590 sky2_rx_start(sky2);
1591}
1592
1593/* Bring up network interface. */
1594static int sky2_up(struct net_device *dev)
1595{
1596 struct sky2_port *sky2 = netdev_priv(dev);
1597 struct sky2_hw *hw = sky2->hw;
1598 unsigned port = sky2->port;
1599 u32 imask;
1600 int err;
1601
1602 netif_carrier_off(dev);
1603
1604 err = sky2_alloc_buffers(sky2);
1572 if (err) 1605 if (err)
1573 goto err_out; 1606 goto err_out;
1574 1607
1608 sky2_hw_up(sky2);
1609
1575 /* Enable interrupts from phy/mac for port */ 1610 /* Enable interrupts from phy/mac for port */
1576 imask = sky2_read32(hw, B0_IMSK); 1611 imask = sky2_read32(hw, B0_IMSK);
1577 imask |= portirq_msk[port]; 1612 imask |= portirq_msk[port];
1578 sky2_write32(hw, B0_IMSK, imask); 1613 sky2_write32(hw, B0_IMSK, imask);
1579 sky2_read32(hw, B0_IMSK); 1614 sky2_read32(hw, B0_IMSK);
1580 1615
1581 if (netif_msg_ifup(sky2)) 1616 netif_info(sky2, ifup, dev, "enabling interface\n");
1582 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
1583 1617
1584 return 0; 1618 return 0;
1585 1619
@@ -1662,9 +1696,8 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
1662 goto mapping_error; 1696 goto mapping_error;
1663 1697
1664 slot = sky2->tx_prod; 1698 slot = sky2->tx_prod;
1665 if (unlikely(netif_msg_tx_queued(sky2))) 1699 netif_printk(sky2, tx_queued, KERN_DEBUG, dev,
1666 printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n", 1700 "tx queued, slot %u, len %d\n", slot, skb->len);
1667 dev->name, slot, skb->len);
1668 1701
1669 /* Send high bits if needed */ 1702 /* Send high bits if needed */
1670 upper = upper_32_bits(mapping); 1703 upper = upper_32_bits(mapping);
@@ -1829,9 +1862,8 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1829 sky2_tx_unmap(sky2->hw->pdev, re); 1862 sky2_tx_unmap(sky2->hw->pdev, re);
1830 1863
1831 if (skb) { 1864 if (skb) {
1832 if (unlikely(netif_msg_tx_done(sky2))) 1865 netif_printk(sky2, tx_done, KERN_DEBUG, dev,
1833 printk(KERN_DEBUG "%s: tx done %u\n", 1866 "tx done %u\n", idx);
1834 dev->name, idx);
1835 1867
1836 dev->stats.tx_packets++; 1868 dev->stats.tx_packets++;
1837 dev->stats.tx_bytes += skb->len; 1869 dev->stats.tx_bytes += skb->len;
@@ -1845,10 +1877,6 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1845 1877
1846 sky2->tx_cons = idx; 1878 sky2->tx_cons = idx;
1847 smp_mb(); 1879 smp_mb();
1848
1849 /* Wake unless it's detached, and called e.g. from sky2_down() */
1850 if (tx_avail(sky2) > MAX_SKB_TX_LE + 4 && netif_device_present(dev))
1851 netif_wake_queue(dev);
1852} 1880}
1853 1881
1854static void sky2_tx_reset(struct sky2_hw *hw, unsigned port) 1882static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
@@ -1873,21 +1901,11 @@ static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
1873 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); 1901 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
1874} 1902}
1875 1903
1876/* Network shutdown */ 1904static void sky2_hw_down(struct sky2_port *sky2)
1877static int sky2_down(struct net_device *dev)
1878{ 1905{
1879 struct sky2_port *sky2 = netdev_priv(dev);
1880 struct sky2_hw *hw = sky2->hw; 1906 struct sky2_hw *hw = sky2->hw;
1881 unsigned port = sky2->port; 1907 unsigned port = sky2->port;
1882 u16 ctrl; 1908 u16 ctrl;
1883 u32 imask;
1884
1885 /* Never really got started! */
1886 if (!sky2->tx_le)
1887 return 0;
1888
1889 if (netif_msg_ifdown(sky2))
1890 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
1891 1909
1892 /* Force flow control off */ 1910 /* Force flow control off */
1893 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 1911 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
@@ -1920,15 +1938,6 @@ static int sky2_down(struct net_device *dev)
1920 1938
1921 sky2_rx_stop(sky2); 1939 sky2_rx_stop(sky2);
1922 1940
1923 /* Disable port IRQ */
1924 imask = sky2_read32(hw, B0_IMSK);
1925 imask &= ~portirq_msk[port];
1926 sky2_write32(hw, B0_IMSK, imask);
1927 sky2_read32(hw, B0_IMSK);
1928
1929 synchronize_irq(hw->pdev->irq);
1930 napi_synchronize(&hw->napi);
1931
1932 spin_lock_bh(&sky2->phy_lock); 1941 spin_lock_bh(&sky2->phy_lock);
1933 sky2_phy_power_down(hw, port); 1942 sky2_phy_power_down(hw, port);
1934 spin_unlock_bh(&sky2->phy_lock); 1943 spin_unlock_bh(&sky2->phy_lock);
@@ -1937,8 +1946,29 @@ static int sky2_down(struct net_device *dev)
1937 1946
1938 /* Free any pending frames stuck in HW queue */ 1947 /* Free any pending frames stuck in HW queue */
1939 sky2_tx_complete(sky2, sky2->tx_prod); 1948 sky2_tx_complete(sky2, sky2->tx_prod);
1949}
1940 1950
1941 sky2_rx_clean(sky2); 1951/* Network shutdown */
1952static int sky2_down(struct net_device *dev)
1953{
1954 struct sky2_port *sky2 = netdev_priv(dev);
1955 struct sky2_hw *hw = sky2->hw;
1956
1957 /* Never really got started! */
1958 if (!sky2->tx_le)
1959 return 0;
1960
1961 netif_info(sky2, ifdown, dev, "disabling interface\n");
1962
1963 /* Disable port IRQ */
1964 sky2_write32(hw, B0_IMSK,
1965 sky2_read32(hw, B0_IMSK) & ~portirq_msk[sky2->port]);
1966 sky2_read32(hw, B0_IMSK);
1967
1968 synchronize_irq(hw->pdev->irq);
1969 napi_synchronize(&hw->napi);
1970
1971 sky2_hw_down(sky2);
1942 1972
1943 sky2_free_buffers(sky2); 1973 sky2_free_buffers(sky2);
1944 1974
@@ -1994,12 +2024,11 @@ static void sky2_link_up(struct sky2_port *sky2)
1994 sky2_write8(hw, SK_REG(port, LNK_LED_REG), 2024 sky2_write8(hw, SK_REG(port, LNK_LED_REG),
1995 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); 2025 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
1996 2026
1997 if (netif_msg_link(sky2)) 2027 netif_info(sky2, link, sky2->netdev,
1998 printk(KERN_INFO PFX 2028 "Link is up at %d Mbps, %s duplex, flow control %s\n",
1999 "%s: Link is up at %d Mbps, %s duplex, flow control %s\n", 2029 sky2->speed,
2000 sky2->netdev->name, sky2->speed, 2030 sky2->duplex == DUPLEX_FULL ? "full" : "half",
2001 sky2->duplex == DUPLEX_FULL ? "full" : "half", 2031 fc_name[sky2->flow_status]);
2002 fc_name[sky2->flow_status]);
2003} 2032}
2004 2033
2005static void sky2_link_down(struct sky2_port *sky2) 2034static void sky2_link_down(struct sky2_port *sky2)
@@ -2019,8 +2048,7 @@ static void sky2_link_down(struct sky2_port *sky2)
2019 /* Turn off link LED */ 2048 /* Turn off link LED */
2020 sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); 2049 sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
2021 2050
2022 if (netif_msg_link(sky2)) 2051 netif_info(sky2, link, sky2->netdev, "Link is down\n");
2023 printk(KERN_INFO PFX "%s: Link is down.\n", sky2->netdev->name);
2024 2052
2025 sky2_phy_init(hw, port); 2053 sky2_phy_init(hw, port);
2026} 2054}
@@ -2042,13 +2070,12 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
2042 advert = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV); 2070 advert = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
2043 lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP); 2071 lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);
2044 if (lpa & PHY_M_AN_RF) { 2072 if (lpa & PHY_M_AN_RF) {
2045 printk(KERN_ERR PFX "%s: remote fault", sky2->netdev->name); 2073 netdev_err(sky2->netdev, "remote fault\n");
2046 return -1; 2074 return -1;
2047 } 2075 }
2048 2076
2049 if (!(aux & PHY_M_PS_SPDUP_RES)) { 2077 if (!(aux & PHY_M_PS_SPDUP_RES)) {
2050 printk(KERN_ERR PFX "%s: speed/duplex mismatch", 2078 netdev_err(sky2->netdev, "speed/duplex mismatch\n");
2051 sky2->netdev->name);
2052 return -1; 2079 return -1;
2053 } 2080 }
2054 2081
@@ -2110,9 +2137,8 @@ static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
2110 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); 2137 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
2111 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); 2138 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
2112 2139
2113 if (netif_msg_intr(sky2)) 2140 netif_info(sky2, intr, sky2->netdev, "phy interrupt status 0x%x 0x%x\n",
2114 printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n", 2141 istatus, phystat);
2115 sky2->netdev->name, istatus, phystat);
2116 2142
2117 if (istatus & PHY_M_IS_AN_COMPL) { 2143 if (istatus & PHY_M_IS_AN_COMPL) {
2118 if (sky2_autoneg_done(sky2, phystat) == 0) 2144 if (sky2_autoneg_done(sky2, phystat) == 0)
@@ -2166,13 +2192,12 @@ static void sky2_tx_timeout(struct net_device *dev)
2166 struct sky2_port *sky2 = netdev_priv(dev); 2192 struct sky2_port *sky2 = netdev_priv(dev);
2167 struct sky2_hw *hw = sky2->hw; 2193 struct sky2_hw *hw = sky2->hw;
2168 2194
2169 if (netif_msg_timer(sky2)) 2195 netif_err(sky2, timer, dev, "tx timeout\n");
2170 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
2171 2196
2172 printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n", 2197 netdev_printk(KERN_DEBUG, dev, "transmit ring %u .. %u report=%u done=%u\n",
2173 dev->name, sky2->tx_cons, sky2->tx_prod, 2198 sky2->tx_cons, sky2->tx_prod,
2174 sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX), 2199 sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
2175 sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE))); 2200 sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE)));
2176 2201
2177 /* can't restart safely under softirq */ 2202 /* can't restart safely under softirq */
2178 schedule_work(&hw->restart_work); 2203 schedule_work(&hw->restart_work);
@@ -2187,14 +2212,20 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
2187 u16 ctl, mode; 2212 u16 ctl, mode;
2188 u32 imask; 2213 u32 imask;
2189 2214
2215 /* MTU size outside the spec */
2190 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) 2216 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
2191 return -EINVAL; 2217 return -EINVAL;
2192 2218
2219 /* MTU > 1500 on yukon FE and FE+ not allowed */
2193 if (new_mtu > ETH_DATA_LEN && 2220 if (new_mtu > ETH_DATA_LEN &&
2194 (hw->chip_id == CHIP_ID_YUKON_FE || 2221 (hw->chip_id == CHIP_ID_YUKON_FE ||
2195 hw->chip_id == CHIP_ID_YUKON_FE_P)) 2222 hw->chip_id == CHIP_ID_YUKON_FE_P))
2196 return -EINVAL; 2223 return -EINVAL;
2197 2224
2225 /* TSO, etc on Yukon Ultra and MTU > 1500 not supported */
2226 if (new_mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U)
2227 dev->features &= ~(NETIF_F_TSO|NETIF_F_SG|NETIF_F_ALL_CSUM);
2228
2198 if (!netif_running(dev)) { 2229 if (!netif_running(dev)) {
2199 dev->mtu = new_mtu; 2230 dev->mtu = new_mtu;
2200 return 0; 2231 return 0;
@@ -2229,7 +2260,11 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
2229 2260
2230 sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD); 2261 sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD);
2231 2262
2232 err = sky2_rx_start(sky2); 2263 err = sky2_alloc_rx_skbs(sky2);
2264 if (!err)
2265 sky2_rx_start(sky2);
2266 else
2267 sky2_rx_clean(sky2);
2233 sky2_write32(hw, B0_IMSK, imask); 2268 sky2_write32(hw, B0_IMSK, imask);
2234 2269
2235 sky2_read32(hw, B0_Y2_SP_LISR); 2270 sky2_read32(hw, B0_Y2_SP_LISR);
@@ -2306,30 +2341,32 @@ static struct sk_buff *receive_new(struct sky2_port *sky2,
2306 struct rx_ring_info *re, 2341 struct rx_ring_info *re,
2307 unsigned int length) 2342 unsigned int length)
2308{ 2343{
2309 struct sk_buff *skb, *nskb; 2344 struct sk_buff *skb;
2345 struct rx_ring_info nre;
2310 unsigned hdr_space = sky2->rx_data_size; 2346 unsigned hdr_space = sky2->rx_data_size;
2311 2347
2312 /* Don't be tricky about reusing pages (yet) */ 2348 nre.skb = sky2_rx_alloc(sky2);
2313 nskb = sky2_rx_alloc(sky2); 2349 if (unlikely(!nre.skb))
2314 if (unlikely(!nskb)) 2350 goto nobuf;
2315 return NULL; 2351
2352 if (sky2_rx_map_skb(sky2->hw->pdev, &nre, hdr_space))
2353 goto nomap;
2316 2354
2317 skb = re->skb; 2355 skb = re->skb;
2318 sky2_rx_unmap_skb(sky2->hw->pdev, re); 2356 sky2_rx_unmap_skb(sky2->hw->pdev, re);
2319
2320 prefetch(skb->data); 2357 prefetch(skb->data);
2321 re->skb = nskb; 2358 *re = nre;
2322 if (sky2_rx_map_skb(sky2->hw->pdev, re, hdr_space)) {
2323 dev_kfree_skb(nskb);
2324 re->skb = skb;
2325 return NULL;
2326 }
2327 2359
2328 if (skb_shinfo(skb)->nr_frags) 2360 if (skb_shinfo(skb)->nr_frags)
2329 skb_put_frags(skb, hdr_space, length); 2361 skb_put_frags(skb, hdr_space, length);
2330 else 2362 else
2331 skb_put(skb, length); 2363 skb_put(skb, length);
2332 return skb; 2364 return skb;
2365
2366nomap:
2367 dev_kfree_skb(nre.skb);
2368nobuf:
2369 return NULL;
2333} 2370}
2334 2371
2335/* 2372/*
@@ -2350,9 +2387,9 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
2350 count -= VLAN_HLEN; 2387 count -= VLAN_HLEN;
2351#endif 2388#endif
2352 2389
2353 if (unlikely(netif_msg_rx_status(sky2))) 2390 netif_printk(sky2, rx_status, KERN_DEBUG, dev,
2354 printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n", 2391 "rx slot %u status 0x%x len %d\n",
2355 dev->name, sky2->rx_next, status, length); 2392 sky2->rx_next, status, length);
2356 2393
2357 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; 2394 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
2358 prefetch(sky2->rx_ring + sky2->rx_next); 2395 prefetch(sky2->rx_ring + sky2->rx_next);
@@ -2381,6 +2418,9 @@ okay:
2381 skb = receive_copy(sky2, re, length); 2418 skb = receive_copy(sky2, re, length);
2382 else 2419 else
2383 skb = receive_new(sky2, re, length); 2420 skb = receive_new(sky2, re, length);
2421
2422 dev->stats.rx_dropped += (skb == NULL);
2423
2384resubmit: 2424resubmit:
2385 sky2_rx_submit(sky2, re); 2425 sky2_rx_submit(sky2, re);
2386 2426
@@ -2390,9 +2430,10 @@ len_error:
2390 /* Truncation of overlength packets 2430 /* Truncation of overlength packets
2391 causes PHY length to not match MAC length */ 2431 causes PHY length to not match MAC length */
2392 ++dev->stats.rx_length_errors; 2432 ++dev->stats.rx_length_errors;
2393 if (netif_msg_rx_err(sky2) && net_ratelimit()) 2433 if (net_ratelimit())
2394 pr_info(PFX "%s: rx length error: status %#x length %d\n", 2434 netif_info(sky2, rx_err, dev,
2395 dev->name, status, length); 2435 "rx length error: status %#x length %d\n",
2436 status, length);
2396 goto resubmit; 2437 goto resubmit;
2397 2438
2398error: 2439error:
@@ -2402,9 +2443,9 @@ error:
2402 goto resubmit; 2443 goto resubmit;
2403 } 2444 }
2404 2445
2405 if (netif_msg_rx_err(sky2) && net_ratelimit()) 2446 if (net_ratelimit())
2406 printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n", 2447 netif_info(sky2, rx_err, dev,
2407 dev->name, status, length); 2448 "rx error, status 0x%x length %d\n", status, length);
2408 2449
2409 if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE)) 2450 if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE))
2410 dev->stats.rx_length_errors++; 2451 dev->stats.rx_length_errors++;
@@ -2421,8 +2462,13 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
2421{ 2462{
2422 struct sky2_port *sky2 = netdev_priv(dev); 2463 struct sky2_port *sky2 = netdev_priv(dev);
2423 2464
2424 if (netif_running(dev)) 2465 if (netif_running(dev)) {
2425 sky2_tx_complete(sky2, last); 2466 sky2_tx_complete(sky2, last);
2467
2468 /* Wake unless it's detached, and called e.g. from sky2_down() */
2469 if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
2470 netif_wake_queue(dev);
2471 }
2426} 2472}
2427 2473
2428static inline void sky2_skb_rx(const struct sky2_port *sky2, 2474static inline void sky2_skb_rx(const struct sky2_port *sky2,
@@ -2458,6 +2504,32 @@ static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port,
2458 } 2504 }
2459} 2505}
2460 2506
2507static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
2508{
2509 /* If this happens then driver assuming wrong format for chip type */
2510 BUG_ON(sky2->hw->flags & SKY2_HW_NEW_LE);
2511
2512 /* Both checksum counters are programmed to start at
2513 * the same offset, so unless there is a problem they
2514 * should match. This failure is an early indication that
2515 * hardware receive checksumming won't work.
2516 */
2517 if (likely((u16)(status >> 16) == (u16)status)) {
2518 struct sk_buff *skb = sky2->rx_ring[sky2->rx_next].skb;
2519 skb->ip_summed = CHECKSUM_COMPLETE;
2520 skb->csum = le16_to_cpu(status);
2521 } else {
2522 dev_notice(&sky2->hw->pdev->dev,
2523 "%s: receive checksum problem (status = %#x)\n",
2524 sky2->netdev->name, status);
2525
2526 /* Disable checksum offload */
2527 sky2->flags &= ~SKY2_FLAG_RX_CHECKSUM;
2528 sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
2529 BMU_DIS_RX_CHKSUM);
2530 }
2531}
2532
2461/* Process status response ring */ 2533/* Process status response ring */
2462static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) 2534static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2463{ 2535{
@@ -2492,11 +2564,10 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2492 case OP_RXSTAT: 2564 case OP_RXSTAT:
2493 total_packets[port]++; 2565 total_packets[port]++;
2494 total_bytes[port] += length; 2566 total_bytes[port] += length;
2567
2495 skb = sky2_receive(dev, length, status); 2568 skb = sky2_receive(dev, length, status);
2496 if (unlikely(!skb)) { 2569 if (!skb)
2497 dev->stats.rx_dropped++;
2498 break; 2570 break;
2499 }
2500 2571
2501 /* This chip reports checksum status differently */ 2572 /* This chip reports checksum status differently */
2502 if (hw->flags & SKY2_HW_NEW_LE) { 2573 if (hw->flags & SKY2_HW_NEW_LE) {
@@ -2527,37 +2598,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2527 /* fall through */ 2598 /* fall through */
2528#endif 2599#endif
2529 case OP_RXCHKS: 2600 case OP_RXCHKS:
2530 if (!(sky2->flags & SKY2_FLAG_RX_CHECKSUM)) 2601 if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM))
2531 break; 2602 sky2_rx_checksum(sky2, status);
2532
2533 /* If this happens then driver assuming wrong format */
2534 if (unlikely(hw->flags & SKY2_HW_NEW_LE)) {
2535 if (net_ratelimit())
2536 printk(KERN_NOTICE "%s: unexpected"
2537 " checksum status\n",
2538 dev->name);
2539 break;
2540 }
2541
2542 /* Both checksum counters are programmed to start at
2543 * the same offset, so unless there is a problem they
2544 * should match. This failure is an early indication that
2545 * hardware receive checksumming won't work.
2546 */
2547 if (likely(status >> 16 == (status & 0xffff))) {
2548 skb = sky2->rx_ring[sky2->rx_next].skb;
2549 skb->ip_summed = CHECKSUM_COMPLETE;
2550 skb->csum = le16_to_cpu(status);
2551 } else {
2552 printk(KERN_NOTICE PFX "%s: hardware receive "
2553 "checksum problem (status = %#x)\n",
2554 dev->name, status);
2555 sky2->flags &= ~SKY2_FLAG_RX_CHECKSUM;
2556
2557 sky2_write32(sky2->hw,
2558 Q_ADDR(rxqaddr[port], Q_CSR),
2559 BMU_DIS_RX_CHKSUM);
2560 }
2561 break; 2603 break;
2562 2604
2563 case OP_TXINDEXLE: 2605 case OP_TXINDEXLE:
@@ -2571,8 +2613,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2571 2613
2572 default: 2614 default:
2573 if (net_ratelimit()) 2615 if (net_ratelimit())
2574 printk(KERN_WARNING PFX 2616 pr_warning("unknown status opcode 0x%x\n", opcode);
2575 "unknown status opcode 0x%x\n", opcode);
2576 } 2617 }
2577 } while (hw->st_idx != idx); 2618 } while (hw->st_idx != idx);
2578 2619
@@ -2591,41 +2632,37 @@ static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
2591 struct net_device *dev = hw->dev[port]; 2632 struct net_device *dev = hw->dev[port];
2592 2633
2593 if (net_ratelimit()) 2634 if (net_ratelimit())
2594 printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n", 2635 netdev_info(dev, "hw error interrupt status 0x%x\n", status);
2595 dev->name, status);
2596 2636
2597 if (status & Y2_IS_PAR_RD1) { 2637 if (status & Y2_IS_PAR_RD1) {
2598 if (net_ratelimit()) 2638 if (net_ratelimit())
2599 printk(KERN_ERR PFX "%s: ram data read parity error\n", 2639 netdev_err(dev, "ram data read parity error\n");
2600 dev->name);
2601 /* Clear IRQ */ 2640 /* Clear IRQ */
2602 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR); 2641 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
2603 } 2642 }
2604 2643
2605 if (status & Y2_IS_PAR_WR1) { 2644 if (status & Y2_IS_PAR_WR1) {
2606 if (net_ratelimit()) 2645 if (net_ratelimit())
2607 printk(KERN_ERR PFX "%s: ram data write parity error\n", 2646 netdev_err(dev, "ram data write parity error\n");
2608 dev->name);
2609 2647
2610 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR); 2648 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
2611 } 2649 }
2612 2650
2613 if (status & Y2_IS_PAR_MAC1) { 2651 if (status & Y2_IS_PAR_MAC1) {
2614 if (net_ratelimit()) 2652 if (net_ratelimit())
2615 printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name); 2653 netdev_err(dev, "MAC parity error\n");
2616 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE); 2654 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
2617 } 2655 }
2618 2656
2619 if (status & Y2_IS_PAR_RX1) { 2657 if (status & Y2_IS_PAR_RX1) {
2620 if (net_ratelimit()) 2658 if (net_ratelimit())
2621 printk(KERN_ERR PFX "%s: RX parity error\n", dev->name); 2659 netdev_err(dev, "RX parity error\n");
2622 sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR); 2660 sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
2623 } 2661 }
2624 2662
2625 if (status & Y2_IS_TCP_TXA1) { 2663 if (status & Y2_IS_TCP_TXA1) {
2626 if (net_ratelimit()) 2664 if (net_ratelimit())
2627 printk(KERN_ERR PFX "%s: TCP segmentation error\n", 2665 netdev_err(dev, "TCP segmentation error\n");
2628 dev->name);
2629 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP); 2666 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
2630 } 2667 }
2631} 2668}
@@ -2683,9 +2720,7 @@ static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
2683 struct sky2_port *sky2 = netdev_priv(dev); 2720 struct sky2_port *sky2 = netdev_priv(dev);
2684 u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); 2721 u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
2685 2722
2686 if (netif_msg_intr(sky2)) 2723 netif_info(sky2, intr, dev, "mac interrupt status 0x%x\n", status);
2687 printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n",
2688 dev->name, status);
2689 2724
2690 if (status & GM_IS_RX_CO_OV) 2725 if (status & GM_IS_RX_CO_OV)
2691 gma_read16(hw, port, GM_RX_IRQ_SRC); 2726 gma_read16(hw, port, GM_RX_IRQ_SRC);
@@ -2710,8 +2745,7 @@ static void sky2_le_error(struct sky2_hw *hw, unsigned port, u16 q)
2710 struct net_device *dev = hw->dev[port]; 2745 struct net_device *dev = hw->dev[port];
2711 u16 idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX)); 2746 u16 idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
2712 2747
2713 dev_err(&hw->pdev->dev, PFX 2748 dev_err(&hw->pdev->dev, "%s: descriptor error q=%#x get=%u put=%u\n",
2714 "%s: descriptor error q=%#x get=%u put=%u\n",
2715 dev->name, (unsigned) q, (unsigned) idx, 2749 dev->name, (unsigned) q, (unsigned) idx,
2716 (unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX))); 2750 (unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX)));
2717 2751
@@ -2736,9 +2770,10 @@ static int sky2_rx_hung(struct net_device *dev)
2736 /* Check if the PCI RX hang */ 2770 /* Check if the PCI RX hang */
2737 (fifo_rp == sky2->check.fifo_rp && 2771 (fifo_rp == sky2->check.fifo_rp &&
2738 fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) { 2772 fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) {
2739 printk(KERN_DEBUG PFX "%s: hung mac %d:%d fifo %d (%d:%d)\n", 2773 netdev_printk(KERN_DEBUG, dev,
2740 dev->name, mac_lev, mac_rp, fifo_lev, fifo_rp, 2774 "hung mac %d:%d fifo %d (%d:%d)\n",
2741 sky2_read8(hw, Q_ADDR(rxq, Q_WP))); 2775 mac_lev, mac_rp, fifo_lev,
2776 fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP)));
2742 return 1; 2777 return 1;
2743 } else { 2778 } else {
2744 sky2->check.last = dev->last_rx; 2779 sky2->check.last = dev->last_rx;
@@ -2769,8 +2804,7 @@ static void sky2_watchdog(unsigned long arg)
2769 /* For chips with Rx FIFO, check if stuck */ 2804 /* For chips with Rx FIFO, check if stuck */
2770 if ((hw->flags & SKY2_HW_RAM_BUFFER) && 2805 if ((hw->flags & SKY2_HW_RAM_BUFFER) &&
2771 sky2_rx_hung(dev)) { 2806 sky2_rx_hung(dev)) {
2772 pr_info(PFX "%s: receiver hang detected\n", 2807 netdev_info(dev, "receiver hang detected\n");
2773 dev->name);
2774 schedule_work(&hw->restart_work); 2808 schedule_work(&hw->restart_work);
2775 return; 2809 return;
2776 } 2810 }
@@ -3010,11 +3044,20 @@ static void sky2_reset(struct sky2_hw *hw)
3010 u32 hwe_mask = Y2_HWE_ALL_MASK; 3044 u32 hwe_mask = Y2_HWE_ALL_MASK;
3011 3045
3012 /* disable ASF */ 3046 /* disable ASF */
3013 if (hw->chip_id == CHIP_ID_YUKON_EX) { 3047 if (hw->chip_id == CHIP_ID_YUKON_EX
3048 || hw->chip_id == CHIP_ID_YUKON_SUPR) {
3049 sky2_write32(hw, CPU_WDOG, 0);
3014 status = sky2_read16(hw, HCU_CCSR); 3050 status = sky2_read16(hw, HCU_CCSR);
3015 status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE | 3051 status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
3016 HCU_CCSR_UC_STATE_MSK); 3052 HCU_CCSR_UC_STATE_MSK);
3053 /*
3054 * CPU clock divider shouldn't be used because
3055 * - ASF firmware may malfunction
3056 * - Yukon-Supreme: Parallel FLASH doesn't support divided clocks
3057 */
3058 status &= ~HCU_CCSR_CPU_CLK_DIVIDE_MSK;
3017 sky2_write16(hw, HCU_CCSR, status); 3059 sky2_write16(hw, HCU_CCSR, status);
3060 sky2_write32(hw, CPU_WDOG, 0);
3018 } else 3061 } else
3019 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 3062 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
3020 sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE); 3063 sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
@@ -3097,7 +3140,7 @@ static void sky2_reset(struct sky2_hw *hw)
3097 /* check if PSMv2 was running before */ 3140 /* check if PSMv2 was running before */
3098 reg = sky2_pci_read16(hw, PSM_CONFIG_REG3); 3141 reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
3099 if (reg & PCI_EXP_LNKCTL_ASPMC) { 3142 if (reg & PCI_EXP_LNKCTL_ASPMC) {
3100 int cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); 3143 cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3101 /* restore the PCIe Link Control register */ 3144 /* restore the PCIe Link Control register */
3102 sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg); 3145 sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg);
3103 } 3146 }
@@ -3188,7 +3231,9 @@ static void sky2_reset(struct sky2_hw *hw)
3188static void sky2_detach(struct net_device *dev) 3231static void sky2_detach(struct net_device *dev)
3189{ 3232{
3190 if (netif_running(dev)) { 3233 if (netif_running(dev)) {
3234 netif_tx_lock(dev);
3191 netif_device_detach(dev); /* stop txq */ 3235 netif_device_detach(dev); /* stop txq */
3236 netif_tx_unlock(dev);
3192 sky2_down(dev); 3237 sky2_down(dev);
3193 } 3238 }
3194} 3239}
@@ -3201,8 +3246,7 @@ static int sky2_reattach(struct net_device *dev)
3201 if (netif_running(dev)) { 3246 if (netif_running(dev)) {
3202 err = sky2_up(dev); 3247 err = sky2_up(dev);
3203 if (err) { 3248 if (err) {
3204 printk(KERN_INFO PFX "%s: could not restart %d\n", 3249 netdev_info(dev, "could not restart %d\n", err);
3205 dev->name, err);
3206 dev_close(dev); 3250 dev_close(dev);
3207 } else { 3251 } else {
3208 netif_device_attach(dev); 3252 netif_device_attach(dev);
@@ -3216,48 +3260,53 @@ static int sky2_reattach(struct net_device *dev)
3216static void sky2_restart(struct work_struct *work) 3260static void sky2_restart(struct work_struct *work)
3217{ 3261{
3218 struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work); 3262 struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
3263 u32 imask;
3219 int i; 3264 int i;
3220 3265
3221 rtnl_lock(); 3266 rtnl_lock();
3222 for (i = 0; i < hw->ports; i++)
3223 sky2_detach(hw->dev[i]);
3224 3267
3225 napi_disable(&hw->napi); 3268 napi_disable(&hw->napi);
3269 synchronize_irq(hw->pdev->irq);
3270 imask = sky2_read32(hw, B0_IMSK);
3226 sky2_write32(hw, B0_IMSK, 0); 3271 sky2_write32(hw, B0_IMSK, 0);
3227 sky2_reset(hw);
3228 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
3229 napi_enable(&hw->napi);
3230 3272
3231 for (i = 0; i < hw->ports; i++) 3273 for (i = 0; i < hw->ports; i++) {
3232 sky2_reattach(hw->dev[i]); 3274 struct net_device *dev = hw->dev[i];
3275 struct sky2_port *sky2 = netdev_priv(dev);
3233 3276
3234 rtnl_unlock(); 3277 if (!netif_running(dev))
3235} 3278 continue;
3236 3279
3237static inline u8 sky2_wol_supported(const struct sky2_hw *hw) 3280 netif_carrier_off(dev);
3238{ 3281 netif_tx_disable(dev);
3239 return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0; 3282 sky2_hw_down(sky2);
3240} 3283 }
3241 3284
3242static void sky2_hw_set_wol(struct sky2_hw *hw) 3285 sky2_reset(hw);
3243{
3244 int wol = 0;
3245 int i;
3246 3286
3247 for (i = 0; i < hw->ports; i++) { 3287 for (i = 0; i < hw->ports; i++) {
3248 struct net_device *dev = hw->dev[i]; 3288 struct net_device *dev = hw->dev[i];
3249 struct sky2_port *sky2 = netdev_priv(dev); 3289 struct sky2_port *sky2 = netdev_priv(dev);
3250 3290
3251 if (sky2->wol) 3291 if (!netif_running(dev))
3252 wol = 1; 3292 continue;
3293
3294 sky2_hw_up(sky2);
3295 netif_wake_queue(dev);
3253 } 3296 }
3254 3297
3255 if (hw->chip_id == CHIP_ID_YUKON_EC_U || 3298 sky2_write32(hw, B0_IMSK, imask);
3256 hw->chip_id == CHIP_ID_YUKON_EX || 3299 sky2_read32(hw, B0_IMSK);
3257 hw->chip_id == CHIP_ID_YUKON_FE_P) 3300
3258 sky2_write32(hw, B0_CTST, wol ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF); 3301 sky2_read32(hw, B0_Y2_SP_LISR);
3302 napi_enable(&hw->napi);
3259 3303
3260 device_set_wakeup_enable(&hw->pdev->dev, wol); 3304 rtnl_unlock();
3305}
3306
3307static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
3308{
3309 return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
3261} 3310}
3262 3311
3263static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 3312static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -3278,11 +3327,6 @@ static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3278 return -EOPNOTSUPP; 3327 return -EOPNOTSUPP;
3279 3328
3280 sky2->wol = wol->wolopts; 3329 sky2->wol = wol->wolopts;
3281
3282 sky2_hw_set_wol(hw);
3283
3284 if (!netif_running(dev))
3285 sky2_wol_init(sky2);
3286 return 0; 3330 return 0;
3287} 3331}
3288 3332
@@ -3577,7 +3621,7 @@ static void sky2_set_multicast(struct net_device *dev)
3577 struct sky2_port *sky2 = netdev_priv(dev); 3621 struct sky2_port *sky2 = netdev_priv(dev);
3578 struct sky2_hw *hw = sky2->hw; 3622 struct sky2_hw *hw = sky2->hw;
3579 unsigned port = sky2->port; 3623 unsigned port = sky2->port;
3580 struct dev_mc_list *list = dev->mc_list; 3624 struct dev_mc_list *list;
3581 u16 reg; 3625 u16 reg;
3582 u8 filter[8]; 3626 u8 filter[8];
3583 int rx_pause; 3627 int rx_pause;
@@ -3593,16 +3637,15 @@ static void sky2_set_multicast(struct net_device *dev)
3593 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 3637 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
3594 else if (dev->flags & IFF_ALLMULTI) 3638 else if (dev->flags & IFF_ALLMULTI)
3595 memset(filter, 0xff, sizeof(filter)); 3639 memset(filter, 0xff, sizeof(filter));
3596 else if (dev->mc_count == 0 && !rx_pause) 3640 else if (netdev_mc_empty(dev) && !rx_pause)
3597 reg &= ~GM_RXCR_MCF_ENA; 3641 reg &= ~GM_RXCR_MCF_ENA;
3598 else { 3642 else {
3599 int i;
3600 reg |= GM_RXCR_MCF_ENA; 3643 reg |= GM_RXCR_MCF_ENA;
3601 3644
3602 if (rx_pause) 3645 if (rx_pause)
3603 sky2_add_filter(filter, pause_mc_addr); 3646 sky2_add_filter(filter, pause_mc_addr);
3604 3647
3605 for (i = 0; list && i < dev->mc_count; i++, list = list->next) 3648 netdev_for_each_mc_addr(list, dev)
3606 sky2_add_filter(filter, list->dmi_addr); 3649 sky2_add_filter(filter, list->dmi_addr);
3607 } 3650 }
3608 3651
@@ -3864,6 +3907,50 @@ static int sky2_get_regs_len(struct net_device *dev)
3864 return 0x4000; 3907 return 0x4000;
3865} 3908}
3866 3909
3910static int sky2_reg_access_ok(struct sky2_hw *hw, unsigned int b)
3911{
3912 /* This complicated switch statement is to make sure and
3913 * only access regions that are unreserved.
3914 * Some blocks are only valid on dual port cards.
3915 */
3916 switch (b) {
3917 /* second port */
3918 case 5: /* Tx Arbiter 2 */
3919 case 9: /* RX2 */
3920 case 14 ... 15: /* TX2 */
3921 case 17: case 19: /* Ram Buffer 2 */
3922 case 22 ... 23: /* Tx Ram Buffer 2 */
3923 case 25: /* Rx MAC Fifo 1 */
3924 case 27: /* Tx MAC Fifo 2 */
3925 case 31: /* GPHY 2 */
3926 case 40 ... 47: /* Pattern Ram 2 */
3927 case 52: case 54: /* TCP Segmentation 2 */
3928 case 112 ... 116: /* GMAC 2 */
3929 return hw->ports > 1;
3930
3931 case 0: /* Control */
3932 case 2: /* Mac address */
3933 case 4: /* Tx Arbiter 1 */
3934 case 7: /* PCI express reg */
3935 case 8: /* RX1 */
3936 case 12 ... 13: /* TX1 */
3937 case 16: case 18:/* Rx Ram Buffer 1 */
3938 case 20 ... 21: /* Tx Ram Buffer 1 */
3939 case 24: /* Rx MAC Fifo 1 */
3940 case 26: /* Tx MAC Fifo 1 */
3941 case 28 ... 29: /* Descriptor and status unit */
3942 case 30: /* GPHY 1*/
3943 case 32 ... 39: /* Pattern Ram 1 */
3944 case 48: case 50: /* TCP Segmentation 1 */
3945 case 56 ... 60: /* PCI space */
3946 case 80 ... 84: /* GMAC 1 */
3947 return 1;
3948
3949 default:
3950 return 0;
3951 }
3952}
3953
3867/* 3954/*
3868 * Returns copy of control register region 3955 * Returns copy of control register region
3869 * Note: ethtool_get_regs always provides full size (16k) buffer 3956 * Note: ethtool_get_regs always provides full size (16k) buffer
@@ -3878,55 +3965,13 @@ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
3878 regs->version = 1; 3965 regs->version = 1;
3879 3966
3880 for (b = 0; b < 128; b++) { 3967 for (b = 0; b < 128; b++) {
3881 /* This complicated switch statement is to make sure and 3968 /* skip poisonous diagnostic ram region in block 3 */
3882 * only access regions that are unreserved. 3969 if (b == 3)
3883 * Some blocks are only valid on dual port cards.
3884 * and block 3 has some special diagnostic registers that
3885 * are poison.
3886 */
3887 switch (b) {
3888 case 3:
3889 /* skip diagnostic ram region */
3890 memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10); 3970 memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10);
3891 break; 3971 else if (sky2_reg_access_ok(sky2->hw, b))
3892
3893 /* dual port cards only */
3894 case 5: /* Tx Arbiter 2 */
3895 case 9: /* RX2 */
3896 case 14 ... 15: /* TX2 */
3897 case 17: case 19: /* Ram Buffer 2 */
3898 case 22 ... 23: /* Tx Ram Buffer 2 */
3899 case 25: /* Rx MAC Fifo 1 */
3900 case 27: /* Tx MAC Fifo 2 */
3901 case 31: /* GPHY 2 */
3902 case 40 ... 47: /* Pattern Ram 2 */
3903 case 52: case 54: /* TCP Segmentation 2 */
3904 case 112 ... 116: /* GMAC 2 */
3905 if (sky2->hw->ports == 1)
3906 goto reserved;
3907 /* fall through */
3908 case 0: /* Control */
3909 case 2: /* Mac address */
3910 case 4: /* Tx Arbiter 1 */
3911 case 7: /* PCI express reg */
3912 case 8: /* RX1 */
3913 case 12 ... 13: /* TX1 */
3914 case 16: case 18:/* Rx Ram Buffer 1 */
3915 case 20 ... 21: /* Tx Ram Buffer 1 */
3916 case 24: /* Rx MAC Fifo 1 */
3917 case 26: /* Tx MAC Fifo 1 */
3918 case 28 ... 29: /* Descriptor and status unit */
3919 case 30: /* GPHY 1*/
3920 case 32 ... 39: /* Pattern Ram 1 */
3921 case 48: case 50: /* TCP Segmentation 1 */
3922 case 56 ... 60: /* PCI space */
3923 case 80 ... 84: /* GMAC 1 */
3924 memcpy_fromio(p, io, 128); 3972 memcpy_fromio(p, io, 128);
3925 break; 3973 else
3926 default:
3927reserved:
3928 memset(p, 0, 128); 3974 memset(p, 0, 128);
3929 }
3930 3975
3931 p += 128; 3976 p += 128;
3932 io += 128; 3977 io += 128;
@@ -3978,7 +4023,7 @@ static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy)
3978 while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) { 4023 while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) {
3979 /* Can take up to 10.6 ms for write */ 4024 /* Can take up to 10.6 ms for write */
3980 if (time_after(jiffies, start + HZ/4)) { 4025 if (time_after(jiffies, start + HZ/4)) {
3981 dev_err(&hw->pdev->dev, PFX "VPD cycle timed out"); 4026 dev_err(&hw->pdev->dev, "VPD cycle timed out\n");
3982 return -ETIMEDOUT; 4027 return -ETIMEDOUT;
3983 } 4028 }
3984 mdelay(1); 4029 mdelay(1);
@@ -4312,8 +4357,7 @@ static int sky2_device_event(struct notifier_block *unused,
4312 4357
4313 case NETDEV_GOING_DOWN: 4358 case NETDEV_GOING_DOWN:
4314 if (sky2->debugfs) { 4359 if (sky2->debugfs) {
4315 printk(KERN_DEBUG PFX "%s: remove debugfs\n", 4360 netdev_printk(KERN_DEBUG, dev, "remove debugfs\n");
4316 dev->name);
4317 debugfs_remove(sky2->debugfs); 4361 debugfs_remove(sky2->debugfs);
4318 sky2->debugfs = NULL; 4362 sky2->debugfs = NULL;
4319 } 4363 }
@@ -4466,9 +4510,7 @@ static void __devinit sky2_show_addr(struct net_device *dev)
4466{ 4510{
4467 const struct sky2_port *sky2 = netdev_priv(dev); 4511 const struct sky2_port *sky2 = netdev_priv(dev);
4468 4512
4469 if (netif_msg_probe(sky2)) 4513 netif_info(sky2, probe, dev, "addr %pM\n", dev->dev_addr);
4470 printk(KERN_INFO PFX "%s: addr %pM\n",
4471 dev->name, dev->dev_addr);
4472} 4514}
4473 4515
4474/* Handle software interrupt used during MSI test */ 4516/* Handle software interrupt used during MSI test */
@@ -4774,7 +4816,6 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
4774 pci_set_drvdata(pdev, NULL); 4816 pci_set_drvdata(pdev, NULL);
4775} 4817}
4776 4818
4777#ifdef CONFIG_PM
4778static int sky2_suspend(struct pci_dev *pdev, pm_message_t state) 4819static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
4779{ 4820{
4780 struct sky2_hw *hw = pci_get_drvdata(pdev); 4821 struct sky2_hw *hw = pci_get_drvdata(pdev);
@@ -4799,6 +4840,8 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
4799 wol |= sky2->wol; 4840 wol |= sky2->wol;
4800 } 4841 }
4801 4842
4843 device_set_wakeup_enable(&pdev->dev, wol != 0);
4844
4802 sky2_write32(hw, B0_IMSK, 0); 4845 sky2_write32(hw, B0_IMSK, 0);
4803 napi_disable(&hw->napi); 4846 napi_disable(&hw->napi);
4804 sky2_power_aux(hw); 4847 sky2_power_aux(hw);
@@ -4811,6 +4854,7 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
4811 return 0; 4854 return 0;
4812} 4855}
4813 4856
4857#ifdef CONFIG_PM
4814static int sky2_resume(struct pci_dev *pdev) 4858static int sky2_resume(struct pci_dev *pdev)
4815{ 4859{
4816 struct sky2_hw *hw = pci_get_drvdata(pdev); 4860 struct sky2_hw *hw = pci_get_drvdata(pdev);
@@ -4830,10 +4874,11 @@ static int sky2_resume(struct pci_dev *pdev)
4830 pci_enable_wake(pdev, PCI_D0, 0); 4874 pci_enable_wake(pdev, PCI_D0, 0);
4831 4875
4832 /* Re-enable all clocks */ 4876 /* Re-enable all clocks */
4833 if (hw->chip_id == CHIP_ID_YUKON_EX || 4877 err = pci_write_config_dword(pdev, PCI_DEV_REG3, 0);
4834 hw->chip_id == CHIP_ID_YUKON_EC_U || 4878 if (err) {
4835 hw->chip_id == CHIP_ID_YUKON_FE_P) 4879 dev_err(&pdev->dev, "PCI write config failed\n");
4836 sky2_pci_write32(hw, PCI_DEV_REG3, 0); 4880 goto out;
4881 }
4837 4882
4838 sky2_reset(hw); 4883 sky2_reset(hw);
4839 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 4884 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
@@ -4859,34 +4904,7 @@ out:
4859 4904
4860static void sky2_shutdown(struct pci_dev *pdev) 4905static void sky2_shutdown(struct pci_dev *pdev)
4861{ 4906{
4862 struct sky2_hw *hw = pci_get_drvdata(pdev); 4907 sky2_suspend(pdev, PMSG_SUSPEND);
4863 int i, wol = 0;
4864
4865 if (!hw)
4866 return;
4867
4868 rtnl_lock();
4869 del_timer_sync(&hw->watchdog_timer);
4870
4871 for (i = 0; i < hw->ports; i++) {
4872 struct net_device *dev = hw->dev[i];
4873 struct sky2_port *sky2 = netdev_priv(dev);
4874
4875 if (sky2->wol) {
4876 wol = 1;
4877 sky2_wol_init(sky2);
4878 }
4879 }
4880
4881 if (wol)
4882 sky2_power_aux(hw);
4883 rtnl_unlock();
4884
4885 pci_enable_wake(pdev, PCI_D3hot, wol);
4886 pci_enable_wake(pdev, PCI_D3cold, wol);
4887
4888 pci_disable_device(pdev);
4889 pci_set_power_state(pdev, PCI_D3hot);
4890} 4908}
4891 4909
4892static struct pci_driver sky2_driver = { 4910static struct pci_driver sky2_driver = {
@@ -4903,7 +4921,7 @@ static struct pci_driver sky2_driver = {
4903 4921
4904static int __init sky2_init_module(void) 4922static int __init sky2_init_module(void)
4905{ 4923{
4906 pr_info(PFX "driver version " DRV_VERSION "\n"); 4924 pr_info("driver version " DRV_VERSION "\n");
4907 4925
4908 sky2_debug_init(); 4926 sky2_debug_init();
4909 return pci_register_driver(&sky2_driver); 4927 return pci_register_driver(&sky2_driver);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 365d79c7d834..a5e182dd9819 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -1895,14 +1895,14 @@ enum {
1895 1895
1896/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */ 1896/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */
1897enum { 1897enum {
1898 TX_STFW_DIS = 1<<31,/* Disable Store & Forward (Yukon-EC Ultra) */ 1898 TX_STFW_DIS = 1<<31,/* Disable Store & Forward */
1899 TX_STFW_ENA = 1<<30,/* Enable Store & Forward (Yukon-EC Ultra) */ 1899 TX_STFW_ENA = 1<<30,/* Enable Store & Forward */
1900 1900
1901 TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */ 1901 TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */
1902 TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */ 1902 TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */
1903 1903
1904 TX_JUMBO_ENA = 1<<23,/* PCI Jumbo Mode enable (Yukon-EC Ultra) */ 1904 TX_PCI_JUM_ENA = 1<<23,/* PCI Jumbo Mode enable */
1905 TX_JUMBO_DIS = 1<<22,/* PCI Jumbo Mode enable (Yukon-EC Ultra) */ 1905 TX_PCI_JUM_DIS = 1<<22,/* PCI Jumbo Mode enable */
1906 1906
1907 GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */ 1907 GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */
1908 GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */ 1908 GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */
@@ -2156,7 +2156,7 @@ struct tx_ring_info {
2156 struct sk_buff *skb; 2156 struct sk_buff *skb;
2157 unsigned long flags; 2157 unsigned long flags;
2158#define TX_MAP_SINGLE 0x0001 2158#define TX_MAP_SINGLE 0x0001
2159#define TX_MAP_PAGE 000002 2159#define TX_MAP_PAGE 0x0002
2160 DECLARE_PCI_UNMAP_ADDR(mapaddr); 2160 DECLARE_PCI_UNMAP_ADDR(mapaddr);
2161 DECLARE_PCI_UNMAP_LEN(maplen); 2161 DECLARE_PCI_UNMAP_LEN(maplen);
2162}; 2162};
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 44ebbaa7457b..9871a2b61f86 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -1323,7 +1323,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
1323 * I don't need to zero the multicast table, because the flag is 1323 * I don't need to zero the multicast table, because the flag is
1324 * checked before the table is 1324 * checked before the table is
1325 */ 1325 */
1326 else if (dev->flags & IFF_ALLMULTI || dev->mc_count > 16) { 1326 else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
1327 DBG(SMC_DEBUG_MISC, "%s: RCR_ALMUL\n", dev->name); 1327 DBG(SMC_DEBUG_MISC, "%s: RCR_ALMUL\n", dev->name);
1328 mcr |= MAC_CR_MCPAS_; 1328 mcr |= MAC_CR_MCPAS_;
1329 } 1329 }
@@ -1340,8 +1340,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
1340 * the number of the 32 bit register, while the low 5 bits are the bit 1340 * the number of the 32 bit register, while the low 5 bits are the bit
1341 * within that register. 1341 * within that register.
1342 */ 1342 */
1343 else if (dev->mc_count) { 1343 else if (!netdev_mc_empty(dev)) {
1344 int i;
1345 struct dev_mc_list *cur_addr; 1344 struct dev_mc_list *cur_addr;
1346 1345
1347 /* Set the Hash perfec mode */ 1346 /* Set the Hash perfec mode */
@@ -1350,8 +1349,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
1350 /* start with a table of all zeros: reject all */ 1349 /* start with a table of all zeros: reject all */
1351 memset(multicast_table, 0, sizeof(multicast_table)); 1350 memset(multicast_table, 0, sizeof(multicast_table));
1352 1351
1353 cur_addr = dev->mc_list; 1352 netdev_for_each_mc_addr(cur_addr, dev) {
1354 for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
1355 u32 position; 1353 u32 position;
1356 1354
1357 /* do we have a pointer here? */ 1355 /* do we have a pointer here? */
@@ -2017,10 +2015,8 @@ static int __devinit smc911x_probe(struct net_device *dev)
2017 "set using ifconfig\n", dev->name); 2015 "set using ifconfig\n", dev->name);
2018 } else { 2016 } else {
2019 /* Print the Ethernet address */ 2017 /* Print the Ethernet address */
2020 printk("%s: Ethernet addr: ", dev->name); 2018 printk("%s: Ethernet addr: %pM\n",
2021 for (i = 0; i < 5; i++) 2019 dev->name, dev->dev_addr);
2022 printk("%2.2x:", dev->dev_addr[i]);
2023 printk("%2.2x\n", dev->dev_addr[5]);
2024 } 2020 }
2025 2021
2026 if (lp->phy_type == 0) { 2022 if (lp->phy_type == 0) {
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index 8371b82323ac..f9a960e7fc1f 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -434,18 +434,18 @@ static void smc_shutdown( int ioaddr )
434*/ 434*/
435 435
436 436
437static void smc_setmulticast( int ioaddr, int count, struct dev_mc_list * addrs ) { 437static void smc_setmulticast(int ioaddr, struct net_device *dev)
438{
438 int i; 439 int i;
439 unsigned char multicast_table[ 8 ]; 440 unsigned char multicast_table[ 8 ];
440 struct dev_mc_list * cur_addr; 441 struct dev_mc_list *cur_addr;
441 /* table for flipping the order of 3 bits */ 442 /* table for flipping the order of 3 bits */
442 unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 }; 443 unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 };
443 444
444 /* start with a table of all zeros: reject all */ 445 /* start with a table of all zeros: reject all */
445 memset( multicast_table, 0, sizeof( multicast_table ) ); 446 memset( multicast_table, 0, sizeof( multicast_table ) );
446 447
447 cur_addr = addrs; 448 netdev_for_each_mc_addr(cur_addr, dev) {
448 for ( i = 0; i < count ; i ++, cur_addr = cur_addr->next ) {
449 int position; 449 int position;
450 450
451 /* do we have a pointer here? */ 451 /* do we have a pointer here? */
@@ -1542,7 +1542,7 @@ static void smc_set_multicast_list(struct net_device *dev)
1542 /* We just get all multicast packets even if we only want them 1542 /* We just get all multicast packets even if we only want them
1543 . from one source. This will be changed at some future 1543 . from one source. This will be changed at some future
1544 . point. */ 1544 . point. */
1545 else if (dev->mc_count ) { 1545 else if (!netdev_mc_empty(dev)) {
1546 /* support hardware multicasting */ 1546 /* support hardware multicasting */
1547 1547
1548 /* be sure I get rid of flags I might have set */ 1548 /* be sure I get rid of flags I might have set */
@@ -1550,7 +1550,7 @@ static void smc_set_multicast_list(struct net_device *dev)
1550 ioaddr + RCR ); 1550 ioaddr + RCR );
1551 /* NOTE: this has to set the bank, so make sure it is the 1551 /* NOTE: this has to set the bank, so make sure it is the
1552 last thing called. The bank is set to zero at the top */ 1552 last thing called. The bank is set to zero at the top */
1553 smc_setmulticast( ioaddr, dev->mc_count, dev->mc_list ); 1553 smc_setmulticast(ioaddr, dev);
1554 } 1554 }
1555 else { 1555 else {
1556 outw( inw( ioaddr + RCR ) & ~(RCR_PROMISC | RCR_ALMUL), 1556 outw( inw( ioaddr + RCR ) & ~(RCR_PROMISC | RCR_ALMUL),
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index ea4fae79d6ec..fc1b5a1a3583 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1395,7 +1395,7 @@ static void smc_set_multicast_list(struct net_device *dev)
1395 * I don't need to zero the multicast table, because the flag is 1395 * I don't need to zero the multicast table, because the flag is
1396 * checked before the table is 1396 * checked before the table is
1397 */ 1397 */
1398 else if (dev->flags & IFF_ALLMULTI || dev->mc_count > 16) { 1398 else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
1399 DBG(2, "%s: RCR_ALMUL\n", dev->name); 1399 DBG(2, "%s: RCR_ALMUL\n", dev->name);
1400 lp->rcr_cur_mode |= RCR_ALMUL; 1400 lp->rcr_cur_mode |= RCR_ALMUL;
1401 } 1401 }
@@ -1412,8 +1412,7 @@ static void smc_set_multicast_list(struct net_device *dev)
1412 * the number of the 8 bit register, while the low 3 bits are the bit 1412 * the number of the 8 bit register, while the low 3 bits are the bit
1413 * within that register. 1413 * within that register.
1414 */ 1414 */
1415 else if (dev->mc_count) { 1415 else if (!netdev_mc_empty(dev)) {
1416 int i;
1417 struct dev_mc_list *cur_addr; 1416 struct dev_mc_list *cur_addr;
1418 1417
1419 /* table for flipping the order of 3 bits */ 1418 /* table for flipping the order of 3 bits */
@@ -1422,13 +1421,9 @@ static void smc_set_multicast_list(struct net_device *dev)
1422 /* start with a table of all zeros: reject all */ 1421 /* start with a table of all zeros: reject all */
1423 memset(multicast_table, 0, sizeof(multicast_table)); 1422 memset(multicast_table, 0, sizeof(multicast_table));
1424 1423
1425 cur_addr = dev->mc_list; 1424 netdev_for_each_mc_addr(cur_addr, dev) {
1426 for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
1427 int position; 1425 int position;
1428 1426
1429 /* do we have a pointer here? */
1430 if (!cur_addr)
1431 break;
1432 /* make sure this is a multicast address - 1427 /* make sure this is a multicast address -
1433 shouldn't this be a given if we have it here ? */ 1428 shouldn't this be a given if we have it here ? */
1434 if (!(*cur_addr->dmi_addr & 1)) 1429 if (!(*cur_addr->dmi_addr & 1))
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 494cd91ea39c..4fd1d8b38788 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -770,29 +770,25 @@ static int smsc911x_mii_probe(struct net_device *dev)
770{ 770{
771 struct smsc911x_data *pdata = netdev_priv(dev); 771 struct smsc911x_data *pdata = netdev_priv(dev);
772 struct phy_device *phydev = NULL; 772 struct phy_device *phydev = NULL;
773 int phy_addr; 773 int ret;
774 774
775 /* find the first phy */ 775 /* find the first phy */
776 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { 776 phydev = phy_find_first(pdata->mii_bus);
777 if (pdata->mii_bus->phy_map[phy_addr]) {
778 phydev = pdata->mii_bus->phy_map[phy_addr];
779 SMSC_TRACE(PROBE, "PHY %d: addr %d, phy_id 0x%08X",
780 phy_addr, phydev->addr, phydev->phy_id);
781 break;
782 }
783 }
784
785 if (!phydev) { 777 if (!phydev) {
786 pr_err("%s: no PHY found\n", dev->name); 778 pr_err("%s: no PHY found\n", dev->name);
787 return -ENODEV; 779 return -ENODEV;
788 } 780 }
789 781
790 phydev = phy_connect(dev, dev_name(&phydev->dev), 782 SMSC_TRACE(PROBE, "PHY %d: addr %d, phy_id 0x%08X",
791 &smsc911x_phy_adjust_link, 0, pdata->config.phy_interface); 783 phy_addr, phydev->addr, phydev->phy_id);
792 784
793 if (IS_ERR(phydev)) { 785 ret = phy_connect_direct(dev, phydev,
786 &smsc911x_phy_adjust_link, 0,
787 pdata->config.phy_interface);
788
789 if (ret) {
794 pr_err("%s: Could not attach to PHY\n", dev->name); 790 pr_err("%s: Could not attach to PHY\n", dev->name);
795 return PTR_ERR(phydev); 791 return ret;
796 } 792 }
797 793
798 pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", 794 pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
@@ -1383,33 +1379,24 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
1383 pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_HPFILT_); 1379 pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_HPFILT_);
1384 pdata->hashhi = 0; 1380 pdata->hashhi = 0;
1385 pdata->hashlo = 0; 1381 pdata->hashlo = 0;
1386 } else if (dev->mc_count > 0) { 1382 } else if (!netdev_mc_empty(dev)) {
1387 /* Enabling specific multicast addresses */ 1383 /* Enabling specific multicast addresses */
1388 unsigned int hash_high = 0; 1384 unsigned int hash_high = 0;
1389 unsigned int hash_low = 0; 1385 unsigned int hash_low = 0;
1390 unsigned int count = 0; 1386 struct dev_mc_list *mc_list;
1391 struct dev_mc_list *mc_list = dev->mc_list;
1392 1387
1393 pdata->set_bits_mask = MAC_CR_HPFILT_; 1388 pdata->set_bits_mask = MAC_CR_HPFILT_;
1394 pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_); 1389 pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_);
1395 1390
1396 while (mc_list) { 1391 netdev_for_each_mc_addr(mc_list, dev) {
1397 count++; 1392 unsigned int bitnum = smsc911x_hash(mc_list->dmi_addr);
1398 if ((mc_list->dmi_addrlen) == ETH_ALEN) { 1393 unsigned int mask = 0x01 << (bitnum & 0x1F);
1399 unsigned int bitnum = 1394
1400 smsc911x_hash(mc_list->dmi_addr); 1395 if (bitnum & 0x20)
1401 unsigned int mask = 0x01 << (bitnum & 0x1F); 1396 hash_high |= mask;
1402 if (bitnum & 0x20) 1397 else
1403 hash_high |= mask; 1398 hash_low |= mask;
1404 else
1405 hash_low |= mask;
1406 } else {
1407 SMSC_WARNING(DRV, "dmi_addrlen != 6");
1408 }
1409 mc_list = mc_list->next;
1410 } 1399 }
1411 if (count != (unsigned int)dev->mc_count)
1412 SMSC_WARNING(DRV, "mc_count != dev->mc_count");
1413 1400
1414 pdata->hashhi = hash_high; 1401 pdata->hashhi = hash_high;
1415 pdata->hashlo = hash_low; 1402 pdata->hashlo = hash_low;
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index 12f0f5d74e3c..30110a11d737 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -80,7 +80,7 @@ struct smsc9420_pdata {
80 int last_carrier; 80 int last_carrier;
81}; 81};
82 82
83static const struct pci_device_id smsc9420_id_table[] = { 83static DEFINE_PCI_DEVICE_TABLE(smsc9420_id_table) = {
84 { PCI_VENDOR_ID_9420, PCI_DEVICE_ID_9420, PCI_ANY_ID, PCI_ANY_ID, }, 84 { PCI_VENDOR_ID_9420, PCI_DEVICE_ID_9420, PCI_ANY_ID, PCI_ANY_ID, },
85 { 0, } 85 { 0, }
86}; 86};
@@ -1062,12 +1062,12 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
1062 mac_cr &= (~MAC_CR_PRMS_); 1062 mac_cr &= (~MAC_CR_PRMS_);
1063 mac_cr |= MAC_CR_MCPAS_; 1063 mac_cr |= MAC_CR_MCPAS_;
1064 mac_cr &= (~MAC_CR_HPFILT_); 1064 mac_cr &= (~MAC_CR_HPFILT_);
1065 } else if (dev->mc_count > 0) { 1065 } else if (!netdev_mc_empty(dev)) {
1066 struct dev_mc_list *mc_list = dev->mc_list; 1066 struct dev_mc_list *mc_list;
1067 u32 hash_lo = 0, hash_hi = 0; 1067 u32 hash_lo = 0, hash_hi = 0;
1068 1068
1069 smsc_dbg(HW, "Multicast filter enabled"); 1069 smsc_dbg(HW, "Multicast filter enabled");
1070 while (mc_list) { 1070 netdev_for_each_mc_addr(mc_list, dev) {
1071 u32 bit_num = smsc9420_hash(mc_list->dmi_addr); 1071 u32 bit_num = smsc9420_hash(mc_list->dmi_addr);
1072 u32 mask = 1 << (bit_num & 0x1F); 1072 u32 mask = 1 << (bit_num & 0x1F);
1073 1073
@@ -1076,7 +1076,6 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
1076 else 1076 else
1077 hash_lo |= mask; 1077 hash_lo |= mask;
1078 1078
1079 mc_list = mc_list->next;
1080 } 1079 }
1081 smsc9420_reg_write(pd, HASHH, hash_hi); 1080 smsc9420_reg_write(pd, HASHH, hash_hi);
1082 smsc9420_reg_write(pd, HASHL, hash_lo); 1081 smsc9420_reg_write(pd, HASHL, hash_lo);
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c
index 9599ce77ef85..287c251075e5 100644
--- a/drivers/net/sonic.c
+++ b/drivers/net/sonic.c
@@ -531,7 +531,7 @@ static void sonic_multicast_list(struct net_device *dev)
531{ 531{
532 struct sonic_local *lp = netdev_priv(dev); 532 struct sonic_local *lp = netdev_priv(dev);
533 unsigned int rcr; 533 unsigned int rcr;
534 struct dev_mc_list *dmi = dev->mc_list; 534 struct dev_mc_list *dmi;
535 unsigned char *addr; 535 unsigned char *addr;
536 int i; 536 int i;
537 537
@@ -541,19 +541,22 @@ static void sonic_multicast_list(struct net_device *dev)
541 if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */ 541 if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
542 rcr |= SONIC_RCR_PRO; 542 rcr |= SONIC_RCR_PRO;
543 } else { 543 } else {
544 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 15)) { 544 if ((dev->flags & IFF_ALLMULTI) ||
545 (netdev_mc_count(dev) > 15)) {
545 rcr |= SONIC_RCR_AMC; 546 rcr |= SONIC_RCR_AMC;
546 } else { 547 } else {
547 if (sonic_debug > 2) 548 if (sonic_debug > 2)
548 printk("sonic_multicast_list: mc_count %d\n", dev->mc_count); 549 printk("sonic_multicast_list: mc_count %d\n",
550 netdev_mc_count(dev));
549 sonic_set_cam_enable(dev, 1); /* always enable our own address */ 551 sonic_set_cam_enable(dev, 1); /* always enable our own address */
550 for (i = 1; i <= dev->mc_count; i++) { 552 i = 1;
553 netdev_for_each_mc_addr(dmi, dev) {
551 addr = dmi->dmi_addr; 554 addr = dmi->dmi_addr;
552 dmi = dmi->next;
553 sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]); 555 sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
554 sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]); 556 sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
555 sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]); 557 sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
556 sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i)); 558 sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i));
559 i++;
557 } 560 }
558 SONIC_WRITE(SONIC_CDC, 16); 561 SONIC_WRITE(SONIC_CDC, 16);
559 /* issue Load CAM command */ 562 /* issue Load CAM command */
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 218524857bfc..2f8a8c32021e 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -72,7 +72,7 @@ MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
72 72
73char spider_net_driver_name[] = "spidernet"; 73char spider_net_driver_name[] = "spidernet";
74 74
75static struct pci_device_id spider_net_pci_tbl[] = { 75static DEFINE_PCI_DEVICE_TABLE(spider_net_pci_tbl) = {
76 { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET, 76 { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
77 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 77 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
78 { 0, } 78 { 0, }
@@ -646,7 +646,7 @@ spider_net_set_multi(struct net_device *netdev)
646 hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */ 646 hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
647 set_bit(0xfd, bitmask); 647 set_bit(0xfd, bitmask);
648 648
649 for (mc = netdev->mc_list; mc; mc = mc->next) { 649 netdev_for_each_mc_addr(mc, netdev) {
650 hash = spider_net_get_multicast_hash(netdev, mc->dmi_addr); 650 hash = spider_net_get_multicast_hash(netdev, mc->dmi_addr);
651 set_bit(hash, bitmask); 651 set_bit(hash, bitmask);
652 } 652 }
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index f9521136a869..6dfa69899019 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -301,7 +301,7 @@ enum chipset {
301 CH_6915 = 0, 301 CH_6915 = 0,
302}; 302};
303 303
304static struct pci_device_id starfire_pci_tbl[] = { 304static DEFINE_PCI_DEVICE_TABLE(starfire_pci_tbl) = {
305 { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 }, 305 { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
306 { 0, } 306 { 0, }
307}; 307};
@@ -1796,22 +1796,22 @@ static void set_rx_mode(struct net_device *dev)
1796 1796
1797 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1797 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1798 rx_mode |= AcceptAll; 1798 rx_mode |= AcceptAll;
1799 } else if ((dev->mc_count > multicast_filter_limit) || 1799 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1800 (dev->flags & IFF_ALLMULTI)) { 1800 (dev->flags & IFF_ALLMULTI)) {
1801 /* Too many to match, or accept all multicasts. */ 1801 /* Too many to match, or accept all multicasts. */
1802 rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter; 1802 rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
1803 } else if (dev->mc_count <= 14) { 1803 } else if (netdev_mc_count(dev) <= 14) {
1804 /* Use the 16 element perfect filter, skip first two entries. */ 1804 /* Use the 16 element perfect filter, skip first two entries. */
1805 void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16; 1805 void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1806 __be16 *eaddrs; 1806 __be16 *eaddrs;
1807 for (i = 2, mclist = dev->mc_list; mclist && i < dev->mc_count + 2; 1807 netdev_for_each_mc_addr(mclist, dev) {
1808 i++, mclist = mclist->next) {
1809 eaddrs = (__be16 *)mclist->dmi_addr; 1808 eaddrs = (__be16 *)mclist->dmi_addr;
1810 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4; 1809 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
1811 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4; 1810 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1812 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8; 1811 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
1813 } 1812 }
1814 eaddrs = (__be16 *)dev->dev_addr; 1813 eaddrs = (__be16 *)dev->dev_addr;
1814 i = netdev_mc_count(dev) + 2;
1815 while (i++ < 16) { 1815 while (i++ < 16) {
1816 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4; 1816 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1817 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4; 1817 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
@@ -1825,8 +1825,7 @@ static void set_rx_mode(struct net_device *dev)
1825 __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */ 1825 __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
1826 1826
1827 memset(mc_filter, 0, sizeof(mc_filter)); 1827 memset(mc_filter, 0, sizeof(mc_filter));
1828 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1828 netdev_for_each_mc_addr(mclist, dev) {
1829 i++, mclist = mclist->next) {
1830 /* The chip uses the upper 9 CRC bits 1829 /* The chip uses the upper 9 CRC bits
1831 as index into the hash table */ 1830 as index into the hash table */
1832 int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23; 1831 int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23;
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
index 35eaa5251d7f..fb287649a305 100644
--- a/drivers/net/stmmac/Kconfig
+++ b/drivers/net/stmmac/Kconfig
@@ -4,8 +4,9 @@ config STMMAC_ETH
4 select PHYLIB 4 select PHYLIB
5 depends on NETDEVICES && CPU_SUBTYPE_ST40 5 depends on NETDEVICES && CPU_SUBTYPE_ST40
6 help 6 help
7 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet 7 This is the driver for the Ethernet IPs are built around a
8 controllers. ST Ethernet IPs are built around a Synopsys IP Core. 8 Synopsys IP Core and fully tested on the STMicroelectronics
9 platforms.
9 10
10if STMMAC_ETH 11if STMMAC_ETH
11 12
@@ -32,7 +33,8 @@ config STMMAC_TIMER
32 default n 33 default n
33 help 34 help
34 Use an external timer for mitigating the number of network 35 Use an external timer for mitigating the number of network
35 interrupts. 36 interrupts. Currently, for SH architectures, it is possible
37 to use the TMU channel 2 and the SH-RTC device.
36 38
37choice 39choice
38 prompt "Select Timer device" 40 prompt "Select Timer device"
diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile
index b2d7a5564dfa..c776af15fe1a 100644
--- a/drivers/net/stmmac/Makefile
+++ b/drivers/net/stmmac/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_STMMAC_ETH) += stmmac.o 1obj-$(CONFIG_STMMAC_ETH) += stmmac.o
2stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o 2stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
3stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \ 3stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
4 mac100.o gmac.o $(stmmac-y) 4 dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
5 dwmac100.o $(stmmac-y)
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
index e49e5188e887..2a58172e986a 100644
--- a/drivers/net/stmmac/common.h
+++ b/drivers/net/stmmac/common.h
@@ -23,132 +23,7 @@
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#include "descs.h" 25#include "descs.h"
26#include <linux/io.h> 26#include <linux/netdevice.h>
27
28/* *********************************************
29 DMA CRS Control and Status Register Mapping
30 * *********************************************/
31#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
32#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
33#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
34#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
35#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
36#define DMA_STATUS 0x00001014 /* Status Register */
37#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
38#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
39#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
40#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
41#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
42
43/* ********************************
44 DMA Control register defines
45 * ********************************/
46#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
47#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
48
49/* **************************************
50 DMA Interrupt Enable register defines
51 * **************************************/
52/**** NORMAL INTERRUPT ****/
53#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
54#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
55#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
56#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
57#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
58
59#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
60 DMA_INTR_ENA_TIE)
61
62/**** ABNORMAL INTERRUPT ****/
63#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
64#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
65#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
66#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
67#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
68#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
69#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
70#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
71#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
72#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
73
74#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
75 DMA_INTR_ENA_UNE)
76
77/* DMA default interrupt mask */
78#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
79
80/* ****************************
81 * DMA Status register defines
82 * ****************************/
83#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
84#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
85#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int. */
86#define DMA_STATUS_GMI 0x08000000
87#define DMA_STATUS_GLI 0x04000000
88#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
89#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
90#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
91#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
92#define DMA_STATUS_TS_SHIFT 20
93#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
94#define DMA_STATUS_RS_SHIFT 17
95#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
96#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
97#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
98#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
99#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
100#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
101#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
102#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
103#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
104#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
105#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
106#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
107#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
108#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
109#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
110
111/* Other defines */
112#define HASH_TABLE_SIZE 64
113#define PAUSE_TIME 0x200
114
115/* Flow Control defines */
116#define FLOW_OFF 0
117#define FLOW_RX 1
118#define FLOW_TX 2
119#define FLOW_AUTO (FLOW_TX | FLOW_RX)
120
121/* DMA STORE-AND-FORWARD Operation Mode */
122#define SF_DMA_MODE 1
123
124#define HW_CSUM 1
125#define NO_HW_CSUM 0
126
127/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
128#define BUF_SIZE_16KiB 16384
129#define BUF_SIZE_8KiB 8192
130#define BUF_SIZE_4KiB 4096
131#define BUF_SIZE_2KiB 2048
132
133/* Power Down and WOL */
134#define PMT_NOT_SUPPORTED 0
135#define PMT_SUPPORTED 1
136
137/* Common MAC defines */
138#define MAC_CTRL_REG 0x00000000 /* MAC Control */
139#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
140#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
141
142/* MAC Management Counters register */
143#define MMC_CONTROL 0x00000100 /* MMC Control */
144#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
145#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
146#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
147#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
148
149#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
150#define MMC_CONTROL_MAX_FRM_SHIFT 3
151#define MMC_CONTROL_MAX_FRAME 0x7FF
152 27
153struct stmmac_extra_stats { 28struct stmmac_extra_stats {
154 /* Transmit errors */ 29 /* Transmit errors */
@@ -169,7 +44,7 @@ struct stmmac_extra_stats {
169 unsigned long rx_toolong; 44 unsigned long rx_toolong;
170 unsigned long rx_collision; 45 unsigned long rx_collision;
171 unsigned long rx_crc; 46 unsigned long rx_crc;
172 unsigned long rx_lenght; 47 unsigned long rx_length;
173 unsigned long rx_mii; 48 unsigned long rx_mii;
174 unsigned long rx_multicast; 49 unsigned long rx_multicast;
175 unsigned long rx_gmac_overflow; 50 unsigned long rx_gmac_overflow;
@@ -198,66 +73,62 @@ struct stmmac_extra_stats {
198 unsigned long normal_irq_n; 73 unsigned long normal_irq_n;
199}; 74};
200 75
201/* GMAC core can compute the checksums in HW. */ 76#define HASH_TABLE_SIZE 64
202enum rx_frame_status { 77#define PAUSE_TIME 0x200
78
79/* Flow Control defines */
80#define FLOW_OFF 0
81#define FLOW_RX 1
82#define FLOW_TX 2
83#define FLOW_AUTO (FLOW_TX | FLOW_RX)
84
85#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
86
87#define HW_CSUM 1
88#define NO_HW_CSUM 0
89enum rx_frame_status { /* IPC status */
203 good_frame = 0, 90 good_frame = 0,
204 discard_frame = 1, 91 discard_frame = 1,
205 csum_none = 2, 92 csum_none = 2,
206}; 93};
207 94
208static inline void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6], 95enum tx_dma_irq_status {
209 unsigned int high, unsigned int low) 96 tx_hard_error = 1,
210{ 97 tx_hard_error_bump_tc = 2,
211 unsigned long data; 98 handle_tx_rx = 3,
212 99};
213 data = (addr[5] << 8) | addr[4];
214 writel(data, ioaddr + high);
215 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
216 writel(data, ioaddr + low);
217 100
218 return; 101/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
219} 102#define BUF_SIZE_16KiB 16384
103#define BUF_SIZE_8KiB 8192
104#define BUF_SIZE_4KiB 4096
105#define BUF_SIZE_2KiB 2048
220 106
221static inline void stmmac_get_mac_addr(unsigned long ioaddr, 107/* Power Down and WOL */
222 unsigned char *addr, unsigned int high, 108#define PMT_NOT_SUPPORTED 0
223 unsigned int low) 109#define PMT_SUPPORTED 1
224{
225 unsigned int hi_addr, lo_addr;
226 110
227 /* Read the MAC address from the hardware */ 111/* Common MAC defines */
228 hi_addr = readl(ioaddr + high); 112#define MAC_CTRL_REG 0x00000000 /* MAC Control */
229 lo_addr = readl(ioaddr + low); 113#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
114#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
230 115
231 /* Extract the MAC address from the high and low words */ 116/* MAC Management Counters register */
232 addr[0] = lo_addr & 0xff; 117#define MMC_CONTROL 0x00000100 /* MMC Control */
233 addr[1] = (lo_addr >> 8) & 0xff; 118#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
234 addr[2] = (lo_addr >> 16) & 0xff; 119#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
235 addr[3] = (lo_addr >> 24) & 0xff; 120#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
236 addr[4] = hi_addr & 0xff; 121#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
237 addr[5] = (hi_addr >> 8) & 0xff;
238 122
239 return; 123#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
240} 124#define MMC_CONTROL_MAX_FRM_SHIFT 3
125#define MMC_CONTROL_MAX_FRAME 0x7FF
241 126
242struct stmmac_ops { 127struct stmmac_desc_ops {
243 /* MAC core initialization */ 128 /* DMA RX descriptor ring initialization */
244 void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
245 /* DMA core initialization */
246 int (*dma_init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
247 /* Dump MAC registers */
248 void (*dump_mac_regs) (unsigned long ioaddr);
249 /* Dump DMA registers */
250 void (*dump_dma_regs) (unsigned long ioaddr);
251 /* Set tx/rx threshold in the csr6 register
252 * An invalid value enables the store-and-forward mode */
253 void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
254 /* To track extra statistic (if supported) */
255 void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
256 unsigned long ioaddr);
257 /* RX descriptor ring initialization */
258 void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size, 129 void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
259 int disable_rx_ic); 130 int disable_rx_ic);
260 /* TX descriptor ring initialization */ 131 /* DMA TX descriptor ring initialization */
261 void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size); 132 void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
262 133
263 /* Invoked by the xmit function to prepare the tx descriptor */ 134 /* Invoked by the xmit function to prepare the tx descriptor */
@@ -281,7 +152,6 @@ struct stmmac_ops {
281 /* Get the buffer size from the descriptor */ 152 /* Get the buffer size from the descriptor */
282 int (*get_tx_len) (struct dma_desc *p); 153 int (*get_tx_len) (struct dma_desc *p);
283 /* Handle extra events on specific interrupts hw dependent */ 154 /* Handle extra events on specific interrupts hw dependent */
284 void (*host_irq_status) (unsigned long ioaddr);
285 int (*get_rx_owner) (struct dma_desc *p); 155 int (*get_rx_owner) (struct dma_desc *p);
286 void (*set_rx_owner) (struct dma_desc *p); 156 void (*set_rx_owner) (struct dma_desc *p);
287 /* Get the receive frame size */ 157 /* Get the receive frame size */
@@ -289,6 +159,37 @@ struct stmmac_ops {
289 /* Return the reception status looking at the RDES1 */ 159 /* Return the reception status looking at the RDES1 */
290 int (*rx_status) (void *data, struct stmmac_extra_stats *x, 160 int (*rx_status) (void *data, struct stmmac_extra_stats *x,
291 struct dma_desc *p); 161 struct dma_desc *p);
162};
163
164struct stmmac_dma_ops {
165 /* DMA core initialization */
166 int (*init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
167 /* Dump DMA registers */
168 void (*dump_regs) (unsigned long ioaddr);
169 /* Set tx/rx threshold in the csr6 register
170 * An invalid value enables the store-and-forward mode */
171 void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
172 /* To track extra statistic (if supported) */
173 void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
174 unsigned long ioaddr);
175 void (*enable_dma_transmission) (unsigned long ioaddr);
176 void (*enable_dma_irq) (unsigned long ioaddr);
177 void (*disable_dma_irq) (unsigned long ioaddr);
178 void (*start_tx) (unsigned long ioaddr);
179 void (*stop_tx) (unsigned long ioaddr);
180 void (*start_rx) (unsigned long ioaddr);
181 void (*stop_rx) (unsigned long ioaddr);
182 int (*dma_interrupt) (unsigned long ioaddr,
183 struct stmmac_extra_stats *x);
184};
185
186struct stmmac_ops {
187 /* MAC core initialization */
188 void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
189 /* Dump MAC registers */
190 void (*dump_regs) (unsigned long ioaddr);
191 /* Handle extra events on specific interrupts hw dependent */
192 void (*host_irq_status) (unsigned long ioaddr);
292 /* Multicast filter setting */ 193 /* Multicast filter setting */
293 void (*set_filter) (struct net_device *dev); 194 void (*set_filter) (struct net_device *dev);
294 /* Flow control setting */ 195 /* Flow control setting */
@@ -298,9 +199,9 @@ struct stmmac_ops {
298 void (*pmt) (unsigned long ioaddr, unsigned long mode); 199 void (*pmt) (unsigned long ioaddr, unsigned long mode);
299 /* Set/Get Unicast MAC addresses */ 200 /* Set/Get Unicast MAC addresses */
300 void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr, 201 void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr,
301 unsigned int reg_n); 202 unsigned int reg_n);
302 void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr, 203 void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr,
303 unsigned int reg_n); 204 unsigned int reg_n);
304}; 205};
305 206
306struct mac_link { 207struct mac_link {
@@ -314,17 +215,19 @@ struct mii_regs {
314 unsigned int data; /* MII Data */ 215 unsigned int data; /* MII Data */
315}; 216};
316 217
317struct hw_cap { 218struct mac_device_info {
318 unsigned int version; /* Core Version register (GMAC) */ 219 struct stmmac_ops *mac;
319 unsigned int pmt; /* Power-Down mode (GMAC) */ 220 struct stmmac_desc_ops *desc;
221 struct stmmac_dma_ops *dma;
222 unsigned int pmt; /* support Power-Down */
223 struct mii_regs mii; /* MII register Addresses */
320 struct mac_link link; 224 struct mac_link link;
321 struct mii_regs mii;
322}; 225};
323 226
324struct mac_device_info { 227struct mac_device_info *dwmac1000_setup(unsigned long addr);
325 struct hw_cap hw; 228struct mac_device_info *dwmac100_setup(unsigned long addr);
326 struct stmmac_ops *ops;
327};
328 229
329struct mac_device_info *gmac_setup(unsigned long addr); 230extern void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
330struct mac_device_info *mac100_setup(unsigned long addr); 231 unsigned int high, unsigned int low);
232extern void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
233 unsigned int high, unsigned int low);
diff --git a/drivers/net/stmmac/descs.h b/drivers/net/stmmac/descs.h
index 6d2a0b2f5e57..63a03e264694 100644
--- a/drivers/net/stmmac/descs.h
+++ b/drivers/net/stmmac/descs.h
@@ -1,6 +1,6 @@
1/******************************************************************************* 1/*******************************************************************************
2 Header File to describe the DMA descriptors 2 Header File to describe the DMA descriptors.
3 Use enhanced descriptors in case of GMAC Cores. 3 Enhanced descriptors have been in case of DWMAC1000 Cores.
4 4
5 This program is free software; you can redistribute it and/or modify it 5 This program is free software; you can redistribute it and/or modify it
6 under the terms and conditions of the GNU General Public License, 6 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/stmmac/mac100.c b/drivers/net/stmmac/dwmac100.c
index 625171b6062b..803b0373d843 100644
--- a/drivers/net/stmmac/mac100.c
+++ b/drivers/net/stmmac/dwmac100.c
@@ -26,23 +26,23 @@
26 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 26 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
27*******************************************************************************/ 27*******************************************************************************/
28 28
29#include <linux/netdevice.h>
30#include <linux/crc32.h> 29#include <linux/crc32.h>
31#include <linux/mii.h> 30#include <linux/mii.h>
32#include <linux/phy.h> 31#include <linux/phy.h>
33 32
34#include "common.h" 33#include "common.h"
35#include "mac100.h" 34#include "dwmac100.h"
35#include "dwmac_dma.h"
36 36
37#undef MAC100_DEBUG 37#undef DWMAC100_DEBUG
38/*#define MAC100_DEBUG*/ 38/*#define DWMAC100_DEBUG*/
39#ifdef MAC100_DEBUG 39#ifdef DWMAC100_DEBUG
40#define DBG(fmt, args...) printk(fmt, ## args) 40#define DBG(fmt, args...) printk(fmt, ## args)
41#else 41#else
42#define DBG(fmt, args...) do { } while (0) 42#define DBG(fmt, args...) do { } while (0)
43#endif 43#endif
44 44
45static void mac100_core_init(unsigned long ioaddr) 45static void dwmac100_core_init(unsigned long ioaddr)
46{ 46{
47 u32 value = readl(ioaddr + MAC_CONTROL); 47 u32 value = readl(ioaddr + MAC_CONTROL);
48 48
@@ -54,43 +54,43 @@ static void mac100_core_init(unsigned long ioaddr)
54 return; 54 return;
55} 55}
56 56
57static void mac100_dump_mac_regs(unsigned long ioaddr) 57static void dwmac100_dump_mac_regs(unsigned long ioaddr)
58{ 58{
59 pr_info("\t----------------------------------------------\n" 59 pr_info("\t----------------------------------------------\n"
60 "\t MAC100 CSR (base addr = 0x%8x)\n" 60 "\t DWMAC 100 CSR (base addr = 0x%8x)\n"
61 "\t----------------------------------------------\n", 61 "\t----------------------------------------------\n",
62 (unsigned int)ioaddr); 62 (unsigned int)ioaddr);
63 pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL, 63 pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
64 readl(ioaddr + MAC_CONTROL)); 64 readl(ioaddr + MAC_CONTROL));
65 pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH, 65 pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
66 readl(ioaddr + MAC_ADDR_HIGH)); 66 readl(ioaddr + MAC_ADDR_HIGH));
67 pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW, 67 pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
68 readl(ioaddr + MAC_ADDR_LOW)); 68 readl(ioaddr + MAC_ADDR_LOW));
69 pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n", 69 pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
70 MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH)); 70 MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
71 pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n", 71 pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
72 MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW)); 72 MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
73 pr_info("\tflow control (offset 0x%x): 0x%08x\n", 73 pr_info("\tflow control (offset 0x%x): 0x%08x\n",
74 MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL)); 74 MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
75 pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1, 75 pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
76 readl(ioaddr + MAC_VLAN1)); 76 readl(ioaddr + MAC_VLAN1));
77 pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2, 77 pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
78 readl(ioaddr + MAC_VLAN2)); 78 readl(ioaddr + MAC_VLAN2));
79 pr_info("\n\tMAC management counter registers\n"); 79 pr_info("\n\tMAC management counter registers\n");
80 pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n", 80 pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
81 MMC_CONTROL, readl(ioaddr + MMC_CONTROL)); 81 MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
82 pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n", 82 pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
83 MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR)); 83 MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
84 pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n", 84 pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
85 MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR)); 85 MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
86 pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n", 86 pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
87 MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK)); 87 MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
88 pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n", 88 pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
89 MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK)); 89 MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
90 return; 90 return;
91} 91}
92 92
93static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, 93static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
94 u32 dma_rx) 94 u32 dma_rx)
95{ 95{
96 u32 value = readl(ioaddr + DMA_BUS_MODE); 96 u32 value = readl(ioaddr + DMA_BUS_MODE);
@@ -117,7 +117,7 @@ static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
117/* Store and Forward capability is not used at all.. 117/* Store and Forward capability is not used at all..
118 * The transmit threshold can be programmed by 118 * The transmit threshold can be programmed by
119 * setting the TTC bits in the DMA control register.*/ 119 * setting the TTC bits in the DMA control register.*/
120static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode, 120static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
121 int rxmode) 121 int rxmode)
122{ 122{
123 u32 csr6 = readl(ioaddr + DMA_CONTROL); 123 u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -134,11 +134,11 @@ static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode,
134 return; 134 return;
135} 135}
136 136
137static void mac100_dump_dma_regs(unsigned long ioaddr) 137static void dwmac100_dump_dma_regs(unsigned long ioaddr)
138{ 138{
139 int i; 139 int i;
140 140
141 DBG(KERN_DEBUG "MAC100 DMA CSR \n"); 141 DBG(KERN_DEBUG "DWMAC 100 DMA CSR \n");
142 for (i = 0; i < 9; i++) 142 for (i = 0; i < 9; i++)
143 pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i, 143 pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
144 (DMA_BUS_MODE + i * 4), 144 (DMA_BUS_MODE + i * 4),
@@ -151,8 +151,9 @@ static void mac100_dump_dma_regs(unsigned long ioaddr)
151} 151}
152 152
153/* DMA controller has two counters to track the number of 153/* DMA controller has two counters to track the number of
154 the receive missed frames. */ 154 * the receive missed frames. */
155static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, 155static void dwmac100_dma_diagnostic_fr(void *data,
156 struct stmmac_extra_stats *x,
156 unsigned long ioaddr) 157 unsigned long ioaddr)
157{ 158{
158 struct net_device_stats *stats = (struct net_device_stats *)data; 159 struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -181,7 +182,8 @@ static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
181 return; 182 return;
182} 183}
183 184
184static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x, 185static int dwmac100_get_tx_frame_status(void *data,
186 struct stmmac_extra_stats *x,
185 struct dma_desc *p, unsigned long ioaddr) 187 struct dma_desc *p, unsigned long ioaddr)
186{ 188{
187 int ret = 0; 189 int ret = 0;
@@ -217,7 +219,7 @@ static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
217 return ret; 219 return ret;
218} 220}
219 221
220static int mac100_get_tx_len(struct dma_desc *p) 222static int dwmac100_get_tx_len(struct dma_desc *p)
221{ 223{
222 return p->des01.tx.buffer1_size; 224 return p->des01.tx.buffer1_size;
223} 225}
@@ -226,14 +228,15 @@ static int mac100_get_tx_len(struct dma_desc *p)
226 * and, if required, updates the multicast statistics. 228 * and, if required, updates the multicast statistics.
227 * In case of success, it returns csum_none becasue the device 229 * In case of success, it returns csum_none becasue the device
228 * is not able to compute the csum in HW. */ 230 * is not able to compute the csum in HW. */
229static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x, 231static int dwmac100_get_rx_frame_status(void *data,
232 struct stmmac_extra_stats *x,
230 struct dma_desc *p) 233 struct dma_desc *p)
231{ 234{
232 int ret = csum_none; 235 int ret = csum_none;
233 struct net_device_stats *stats = (struct net_device_stats *)data; 236 struct net_device_stats *stats = (struct net_device_stats *)data;
234 237
235 if (unlikely(p->des01.rx.last_descriptor == 0)) { 238 if (unlikely(p->des01.rx.last_descriptor == 0)) {
236 pr_warning("mac100 Error: Oversized Ethernet " 239 pr_warning("dwmac100 Error: Oversized Ethernet "
237 "frame spanned multiple buffers\n"); 240 "frame spanned multiple buffers\n");
238 stats->rx_length_errors++; 241 stats->rx_length_errors++;
239 return discard_frame; 242 return discard_frame;
@@ -262,7 +265,7 @@ static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
262 ret = discard_frame; 265 ret = discard_frame;
263 266
264 if (unlikely(p->des01.rx.length_error)) { 267 if (unlikely(p->des01.rx.length_error)) {
265 x->rx_lenght++; 268 x->rx_length++;
266 ret = discard_frame; 269 ret = discard_frame;
267 } 270 }
268 if (unlikely(p->des01.rx.mii_error)) { 271 if (unlikely(p->des01.rx.mii_error)) {
@@ -276,24 +279,24 @@ static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
276 return ret; 279 return ret;
277} 280}
278 281
279static void mac100_irq_status(unsigned long ioaddr) 282static void dwmac100_irq_status(unsigned long ioaddr)
280{ 283{
281 return; 284 return;
282} 285}
283 286
284static void mac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr, 287static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
285 unsigned int reg_n) 288 unsigned int reg_n)
286{ 289{
287 stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); 290 stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
288} 291}
289 292
290static void mac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr, 293static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
291 unsigned int reg_n) 294 unsigned int reg_n)
292{ 295{
293 stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); 296 stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
294} 297}
295 298
296static void mac100_set_filter(struct net_device *dev) 299static void dwmac100_set_filter(struct net_device *dev)
297{ 300{
298 unsigned long ioaddr = dev->base_addr; 301 unsigned long ioaddr = dev->base_addr;
299 u32 value = readl(ioaddr + MAC_CONTROL); 302 u32 value = readl(ioaddr + MAC_CONTROL);
@@ -302,29 +305,27 @@ static void mac100_set_filter(struct net_device *dev)
302 value |= MAC_CONTROL_PR; 305 value |= MAC_CONTROL_PR;
303 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO | 306 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
304 MAC_CONTROL_HP); 307 MAC_CONTROL_HP);
305 } else if ((dev->mc_count > HASH_TABLE_SIZE) 308 } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
306 || (dev->flags & IFF_ALLMULTI)) { 309 || (dev->flags & IFF_ALLMULTI)) {
307 value |= MAC_CONTROL_PM; 310 value |= MAC_CONTROL_PM;
308 value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO); 311 value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
309 writel(0xffffffff, ioaddr + MAC_HASH_HIGH); 312 writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
310 writel(0xffffffff, ioaddr + MAC_HASH_LOW); 313 writel(0xffffffff, ioaddr + MAC_HASH_LOW);
311 } else if (dev->mc_count == 0) { /* no multicast */ 314 } else if (netdev_mc_empty(dev)) { /* no multicast */
312 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF | 315 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
313 MAC_CONTROL_HO | MAC_CONTROL_HP); 316 MAC_CONTROL_HO | MAC_CONTROL_HP);
314 } else { 317 } else {
315 int i;
316 u32 mc_filter[2]; 318 u32 mc_filter[2];
317 struct dev_mc_list *mclist; 319 struct dev_mc_list *mclist;
318 320
319 /* Perfect filter mode for physical address and Hash 321 /* Perfect filter mode for physical address and Hash
320 filter for multicast */ 322 filter for multicast */
321 value |= MAC_CONTROL_HP; 323 value |= MAC_CONTROL_HP;
322 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF 324 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
323 | MAC_CONTROL_HO); 325 MAC_CONTROL_IF | MAC_CONTROL_HO);
324 326
325 memset(mc_filter, 0, sizeof(mc_filter)); 327 memset(mc_filter, 0, sizeof(mc_filter));
326 for (i = 0, mclist = dev->mc_list; 328 netdev_for_each_mc_addr(mclist, dev) {
327 mclist && i < dev->mc_count; i++, mclist = mclist->next) {
328 /* The upper 6 bits of the calculated CRC are used to 329 /* The upper 6 bits of the calculated CRC are used to
329 * index the contens of the hash table */ 330 * index the contens of the hash table */
330 int bit_nr = 331 int bit_nr =
@@ -347,7 +348,7 @@ static void mac100_set_filter(struct net_device *dev)
347 return; 348 return;
348} 349}
349 350
350static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex, 351static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
351 unsigned int fc, unsigned int pause_time) 352 unsigned int fc, unsigned int pause_time)
352{ 353{
353 unsigned int flow = MAC_FLOW_CTRL_ENABLE; 354 unsigned int flow = MAC_FLOW_CTRL_ENABLE;
@@ -359,13 +360,15 @@ static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
359 return; 360 return;
360} 361}
361 362
362/* No PMT module supported in our SoC for the Ethernet Controller. */ 363/* No PMT module supported for this Ethernet Controller.
363static void mac100_pmt(unsigned long ioaddr, unsigned long mode) 364 * Tested on ST platforms only.
365 */
366static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode)
364{ 367{
365 return; 368 return;
366} 369}
367 370
368static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size, 371static void dwmac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
369 int disable_rx_ic) 372 int disable_rx_ic)
370{ 373{
371 int i; 374 int i;
@@ -381,7 +384,7 @@ static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
381 return; 384 return;
382} 385}
383 386
384static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size) 387static void dwmac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
385{ 388{
386 int i; 389 int i;
387 for (i = 0; i < ring_size; i++) { 390 for (i = 0; i < ring_size; i++) {
@@ -393,32 +396,32 @@ static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
393 return; 396 return;
394} 397}
395 398
396static int mac100_get_tx_owner(struct dma_desc *p) 399static int dwmac100_get_tx_owner(struct dma_desc *p)
397{ 400{
398 return p->des01.tx.own; 401 return p->des01.tx.own;
399} 402}
400 403
401static int mac100_get_rx_owner(struct dma_desc *p) 404static int dwmac100_get_rx_owner(struct dma_desc *p)
402{ 405{
403 return p->des01.rx.own; 406 return p->des01.rx.own;
404} 407}
405 408
406static void mac100_set_tx_owner(struct dma_desc *p) 409static void dwmac100_set_tx_owner(struct dma_desc *p)
407{ 410{
408 p->des01.tx.own = 1; 411 p->des01.tx.own = 1;
409} 412}
410 413
411static void mac100_set_rx_owner(struct dma_desc *p) 414static void dwmac100_set_rx_owner(struct dma_desc *p)
412{ 415{
413 p->des01.rx.own = 1; 416 p->des01.rx.own = 1;
414} 417}
415 418
416static int mac100_get_tx_ls(struct dma_desc *p) 419static int dwmac100_get_tx_ls(struct dma_desc *p)
417{ 420{
418 return p->des01.tx.last_segment; 421 return p->des01.tx.last_segment;
419} 422}
420 423
421static void mac100_release_tx_desc(struct dma_desc *p) 424static void dwmac100_release_tx_desc(struct dma_desc *p)
422{ 425{
423 int ter = p->des01.tx.end_ring; 426 int ter = p->des01.tx.end_ring;
424 427
@@ -444,74 +447,91 @@ static void mac100_release_tx_desc(struct dma_desc *p)
444 return; 447 return;
445} 448}
446 449
447static void mac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, 450static void dwmac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
448 int csum_flag) 451 int csum_flag)
449{ 452{
450 p->des01.tx.first_segment = is_fs; 453 p->des01.tx.first_segment = is_fs;
451 p->des01.tx.buffer1_size = len; 454 p->des01.tx.buffer1_size = len;
452} 455}
453 456
454static void mac100_clear_tx_ic(struct dma_desc *p) 457static void dwmac100_clear_tx_ic(struct dma_desc *p)
455{ 458{
456 p->des01.tx.interrupt = 0; 459 p->des01.tx.interrupt = 0;
457} 460}
458 461
459static void mac100_close_tx_desc(struct dma_desc *p) 462static void dwmac100_close_tx_desc(struct dma_desc *p)
460{ 463{
461 p->des01.tx.last_segment = 1; 464 p->des01.tx.last_segment = 1;
462 p->des01.tx.interrupt = 1; 465 p->des01.tx.interrupt = 1;
463} 466}
464 467
465static int mac100_get_rx_frame_len(struct dma_desc *p) 468static int dwmac100_get_rx_frame_len(struct dma_desc *p)
466{ 469{
467 return p->des01.rx.frame_length; 470 return p->des01.rx.frame_length;
468} 471}
469 472
470struct stmmac_ops mac100_driver = { 473struct stmmac_ops dwmac100_ops = {
471 .core_init = mac100_core_init, 474 .core_init = dwmac100_core_init,
472 .dump_mac_regs = mac100_dump_mac_regs, 475 .dump_regs = dwmac100_dump_mac_regs,
473 .dma_init = mac100_dma_init, 476 .host_irq_status = dwmac100_irq_status,
474 .dump_dma_regs = mac100_dump_dma_regs, 477 .set_filter = dwmac100_set_filter,
475 .dma_mode = mac100_dma_operation_mode, 478 .flow_ctrl = dwmac100_flow_ctrl,
476 .dma_diagnostic_fr = mac100_dma_diagnostic_fr, 479 .pmt = dwmac100_pmt,
477 .tx_status = mac100_get_tx_frame_status, 480 .set_umac_addr = dwmac100_set_umac_addr,
478 .rx_status = mac100_get_rx_frame_status, 481 .get_umac_addr = dwmac100_get_umac_addr,
479 .get_tx_len = mac100_get_tx_len,
480 .set_filter = mac100_set_filter,
481 .flow_ctrl = mac100_flow_ctrl,
482 .pmt = mac100_pmt,
483 .init_rx_desc = mac100_init_rx_desc,
484 .init_tx_desc = mac100_init_tx_desc,
485 .get_tx_owner = mac100_get_tx_owner,
486 .get_rx_owner = mac100_get_rx_owner,
487 .release_tx_desc = mac100_release_tx_desc,
488 .prepare_tx_desc = mac100_prepare_tx_desc,
489 .clear_tx_ic = mac100_clear_tx_ic,
490 .close_tx_desc = mac100_close_tx_desc,
491 .get_tx_ls = mac100_get_tx_ls,
492 .set_tx_owner = mac100_set_tx_owner,
493 .set_rx_owner = mac100_set_rx_owner,
494 .get_rx_frame_len = mac100_get_rx_frame_len,
495 .host_irq_status = mac100_irq_status,
496 .set_umac_addr = mac100_set_umac_addr,
497 .get_umac_addr = mac100_get_umac_addr,
498}; 482};
499 483
500struct mac_device_info *mac100_setup(unsigned long ioaddr) 484struct stmmac_dma_ops dwmac100_dma_ops = {
485 .init = dwmac100_dma_init,
486 .dump_regs = dwmac100_dump_dma_regs,
487 .dma_mode = dwmac100_dma_operation_mode,
488 .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
489 .enable_dma_transmission = dwmac_enable_dma_transmission,
490 .enable_dma_irq = dwmac_enable_dma_irq,
491 .disable_dma_irq = dwmac_disable_dma_irq,
492 .start_tx = dwmac_dma_start_tx,
493 .stop_tx = dwmac_dma_stop_tx,
494 .start_rx = dwmac_dma_start_rx,
495 .stop_rx = dwmac_dma_stop_rx,
496 .dma_interrupt = dwmac_dma_interrupt,
497};
498
499struct stmmac_desc_ops dwmac100_desc_ops = {
500 .tx_status = dwmac100_get_tx_frame_status,
501 .rx_status = dwmac100_get_rx_frame_status,
502 .get_tx_len = dwmac100_get_tx_len,
503 .init_rx_desc = dwmac100_init_rx_desc,
504 .init_tx_desc = dwmac100_init_tx_desc,
505 .get_tx_owner = dwmac100_get_tx_owner,
506 .get_rx_owner = dwmac100_get_rx_owner,
507 .release_tx_desc = dwmac100_release_tx_desc,
508 .prepare_tx_desc = dwmac100_prepare_tx_desc,
509 .clear_tx_ic = dwmac100_clear_tx_ic,
510 .close_tx_desc = dwmac100_close_tx_desc,
511 .get_tx_ls = dwmac100_get_tx_ls,
512 .set_tx_owner = dwmac100_set_tx_owner,
513 .set_rx_owner = dwmac100_set_rx_owner,
514 .get_rx_frame_len = dwmac100_get_rx_frame_len,
515};
516
517struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
501{ 518{
502 struct mac_device_info *mac; 519 struct mac_device_info *mac;
503 520
504 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); 521 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
505 522
506 pr_info("\tMAC 10/100\n"); 523 pr_info("\tDWMAC100\n");
524
525 mac->mac = &dwmac100_ops;
526 mac->desc = &dwmac100_desc_ops;
527 mac->dma = &dwmac100_dma_ops;
507 528
508 mac->ops = &mac100_driver; 529 mac->pmt = PMT_NOT_SUPPORTED;
509 mac->hw.pmt = PMT_NOT_SUPPORTED; 530 mac->link.port = MAC_CONTROL_PS;
510 mac->hw.link.port = MAC_CONTROL_PS; 531 mac->link.duplex = MAC_CONTROL_F;
511 mac->hw.link.duplex = MAC_CONTROL_F; 532 mac->link.speed = 0;
512 mac->hw.link.speed = 0; 533 mac->mii.addr = MAC_MII_ADDR;
513 mac->hw.mii.addr = MAC_MII_ADDR; 534 mac->mii.data = MAC_MII_DATA;
514 mac->hw.mii.data = MAC_MII_DATA;
515 535
516 return mac; 536 return mac;
517} 537}
diff --git a/drivers/net/stmmac/mac100.h b/drivers/net/stmmac/dwmac100.h
index 0f8f110d004a..0f8f110d004a 100644
--- a/drivers/net/stmmac/mac100.h
+++ b/drivers/net/stmmac/dwmac100.h
diff --git a/drivers/net/stmmac/gmac.h b/drivers/net/stmmac/dwmac1000.h
index 2e82d6c9a148..62dca0e384e7 100644
--- a/drivers/net/stmmac/gmac.h
+++ b/drivers/net/stmmac/dwmac1000.h
@@ -20,6 +20,9 @@
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/ 21*******************************************************************************/
22 22
23#include <linux/phy.h>
24#include "common.h"
25
23#define GMAC_CONTROL 0x00000000 /* Configuration */ 26#define GMAC_CONTROL 0x00000000 /* Configuration */
24#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */ 27#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */
25#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */ 28#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
@@ -32,7 +35,7 @@
32#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */ 35#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
33 36
34#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */ 37#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
35enum gmac_irq_status { 38enum dwmac1000_irq_status {
36 time_stamp_irq = 0x0200, 39 time_stamp_irq = 0x0200,
37 mmc_rx_csum_offload_irq = 0x0080, 40 mmc_rx_csum_offload_irq = 0x0080,
38 mmc_tx_irq = 0x0040, 41 mmc_tx_irq = 0x0040,
@@ -202,3 +205,16 @@ enum rtc_control {
202#define GMAC_MMC_RX_INTR 0x104 205#define GMAC_MMC_RX_INTR 0x104
203#define GMAC_MMC_TX_INTR 0x108 206#define GMAC_MMC_TX_INTR 0x108
204#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 207#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
208
209#undef DWMAC1000_DEBUG
210/* #define DWMAC1000__DEBUG */
211#undef FRAME_FILTER_DEBUG
212/* #define FRAME_FILTER_DEBUG */
213#ifdef DWMAC1000__DEBUG
214#define DBG(fmt, args...) printk(fmt, ## args)
215#else
216#define DBG(fmt, args...) do { } while (0)
217#endif
218
219extern struct stmmac_dma_ops dwmac1000_dma_ops;
220extern struct stmmac_desc_ops dwmac1000_desc_ops;
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
new file mode 100644
index 000000000000..a6538ae4694c
--- /dev/null
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -0,0 +1,243 @@
1/*******************************************************************************
2 This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
3 DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
4 developing this code.
5
6 This only implements the mac core functions for this chip.
7
8 Copyright (C) 2007-2009 STMicroelectronics Ltd
9
10 This program is free software; you can redistribute it and/or modify it
11 under the terms and conditions of the GNU General Public License,
12 version 2, as published by the Free Software Foundation.
13
14 This program is distributed in the hope it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 more details.
18
19 You should have received a copy of the GNU General Public License along with
20 this program; if not, write to the Free Software Foundation, Inc.,
21 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22
23 The full GNU General Public License is included in this distribution in
24 the file called "COPYING".
25
26 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
27*******************************************************************************/
28
29#include <linux/crc32.h>
30#include "dwmac1000.h"
31
32static void dwmac1000_core_init(unsigned long ioaddr)
33{
34 u32 value = readl(ioaddr + GMAC_CONTROL);
35 value |= GMAC_CORE_INIT;
36 writel(value, ioaddr + GMAC_CONTROL);
37
38 /* STBus Bridge Configuration */
39 /*writel(0xc5608, ioaddr + 0x00007000);*/
40
41 /* Freeze MMC counters */
42 writel(0x8, ioaddr + GMAC_MMC_CTRL);
43 /* Mask GMAC interrupts */
44 writel(0x207, ioaddr + GMAC_INT_MASK);
45
46#ifdef STMMAC_VLAN_TAG_USED
47 /* Tag detection without filtering */
48 writel(0x0, ioaddr + GMAC_VLAN_TAG);
49#endif
50 return;
51}
52
53static void dwmac1000_dump_regs(unsigned long ioaddr)
54{
55 int i;
56 pr_info("\tDWMAC1000 regs (base addr = 0x%8x)\n", (unsigned int)ioaddr);
57
58 for (i = 0; i < 55; i++) {
59 int offset = i * 4;
60 pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
61 offset, readl(ioaddr + offset));
62 }
63 return;
64}
65
66static void dwmac1000_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
67 unsigned int reg_n)
68{
69 stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
70 GMAC_ADDR_LOW(reg_n));
71}
72
73static void dwmac1000_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
74 unsigned int reg_n)
75{
76 stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
77 GMAC_ADDR_LOW(reg_n));
78}
79
80static void dwmac1000_set_filter(struct net_device *dev)
81{
82 unsigned long ioaddr = dev->base_addr;
83 unsigned int value = 0;
84
85 DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
86 __func__, netdev_mc_count(dev), netdev_uc_count(dev));
87
88 if (dev->flags & IFF_PROMISC)
89 value = GMAC_FRAME_FILTER_PR;
90 else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
91 || (dev->flags & IFF_ALLMULTI)) {
92 value = GMAC_FRAME_FILTER_PM; /* pass all multi */
93 writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
94 writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
95 } else if (!netdev_mc_empty(dev)) {
96 u32 mc_filter[2];
97 struct dev_mc_list *mclist;
98
99 /* Hash filter for multicast */
100 value = GMAC_FRAME_FILTER_HMC;
101
102 memset(mc_filter, 0, sizeof(mc_filter));
103 netdev_for_each_mc_addr(mclist, dev) {
104 /* The upper 6 bits of the calculated CRC are used to
105 index the contens of the hash table */
106 int bit_nr =
107 bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
108 /* The most significant bit determines the register to
109 * use (H/L) while the other 5 bits determine the bit
110 * within the register. */
111 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
112 }
113 writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
114 writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
115 }
116
117 /* Handle multiple unicast addresses (perfect filtering)*/
118 if (netdev_uc_count(dev) > GMAC_MAX_UNICAST_ADDRESSES)
119 /* Switch to promiscuous mode is more than 16 addrs
120 are required */
121 value |= GMAC_FRAME_FILTER_PR;
122 else {
123 int reg = 1;
124 struct netdev_hw_addr *ha;
125
126 netdev_for_each_uc_addr(ha, dev) {
127 dwmac1000_set_umac_addr(ioaddr, ha->addr, reg);
128 reg++;
129 }
130 }
131
132#ifdef FRAME_FILTER_DEBUG
133 /* Enable Receive all mode (to debug filtering_fail errors) */
134 value |= GMAC_FRAME_FILTER_RA;
135#endif
136 writel(value, ioaddr + GMAC_FRAME_FILTER);
137
138 DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
139 "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
140 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
141
142 return;
143}
144
145static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
146 unsigned int fc, unsigned int pause_time)
147{
148 unsigned int flow = 0;
149
150 DBG(KERN_DEBUG "GMAC Flow-Control:\n");
151 if (fc & FLOW_RX) {
152 DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
153 flow |= GMAC_FLOW_CTRL_RFE;
154 }
155 if (fc & FLOW_TX) {
156 DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
157 flow |= GMAC_FLOW_CTRL_TFE;
158 }
159
160 if (duplex) {
161 DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
162 flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
163 }
164
165 writel(flow, ioaddr + GMAC_FLOW_CTRL);
166 return;
167}
168
169static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
170{
171 unsigned int pmt = 0;
172
173 if (mode == WAKE_MAGIC) {
174 DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
175 pmt |= power_down | magic_pkt_en;
176 } else if (mode == WAKE_UCAST) {
177 DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
178 pmt |= global_unicast;
179 }
180
181 writel(pmt, ioaddr + GMAC_PMT);
182 return;
183}
184
185
186static void dwmac1000_irq_status(unsigned long ioaddr)
187{
188 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
189
190 /* Not used events (e.g. MMC interrupts) are not handled. */
191 if ((intr_status & mmc_tx_irq))
192 DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
193 readl(ioaddr + GMAC_MMC_TX_INTR));
194 if (unlikely(intr_status & mmc_rx_irq))
195 DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
196 readl(ioaddr + GMAC_MMC_RX_INTR));
197 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
198 DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
199 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
200 if (unlikely(intr_status & pmt_irq)) {
201 DBG(KERN_DEBUG "GMAC: received Magic frame\n");
202 /* clear the PMT bits 5 and 6 by reading the PMT
203 * status register. */
204 readl(ioaddr + GMAC_PMT);
205 }
206
207 return;
208}
209
210struct stmmac_ops dwmac1000_ops = {
211 .core_init = dwmac1000_core_init,
212 .dump_regs = dwmac1000_dump_regs,
213 .host_irq_status = dwmac1000_irq_status,
214 .set_filter = dwmac1000_set_filter,
215 .flow_ctrl = dwmac1000_flow_ctrl,
216 .pmt = dwmac1000_pmt,
217 .set_umac_addr = dwmac1000_set_umac_addr,
218 .get_umac_addr = dwmac1000_get_umac_addr,
219};
220
221struct mac_device_info *dwmac1000_setup(unsigned long ioaddr)
222{
223 struct mac_device_info *mac;
224 u32 uid = readl(ioaddr + GMAC_VERSION);
225
226 pr_info("\tDWMAC1000 - user ID: 0x%x, Synopsys ID: 0x%x\n",
227 ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
228
229 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
230
231 mac->mac = &dwmac1000_ops;
232 mac->desc = &dwmac1000_desc_ops;
233 mac->dma = &dwmac1000_dma_ops;
234
235 mac->pmt = PMT_SUPPORTED;
236 mac->link.port = GMAC_CONTROL_PS;
237 mac->link.duplex = GMAC_CONTROL_DM;
238 mac->link.speed = GMAC_CONTROL_FES;
239 mac->mii.addr = GMAC_MII_ADDR;
240 mac->mii.data = GMAC_MII_DATA;
241
242 return mac;
243}
diff --git a/drivers/net/stmmac/gmac.c b/drivers/net/stmmac/dwmac1000_dma.c
index 52586ee68953..39d436a2da68 100644
--- a/drivers/net/stmmac/gmac.c
+++ b/drivers/net/stmmac/dwmac1000_dma.c
@@ -3,6 +3,8 @@
3 DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for 3 DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
4 developing this code. 4 developing this code.
5 5
6 This contains the functions to handle the dma and descriptors.
7
6 Copyright (C) 2007-2009 STMicroelectronics Ltd 8 Copyright (C) 2007-2009 STMicroelectronics Ltd
7 9
8 This program is free software; you can redistribute it and/or modify it 10 This program is free software; you can redistribute it and/or modify it
@@ -24,41 +26,11 @@
24 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 26 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
25*******************************************************************************/ 27*******************************************************************************/
26 28
27#include <linux/netdevice.h> 29#include "dwmac1000.h"
28#include <linux/crc32.h> 30#include "dwmac_dma.h"
29#include <linux/mii.h>
30#include <linux/phy.h>
31
32#include "stmmac.h"
33#include "gmac.h"
34
35#undef GMAC_DEBUG
36/*#define GMAC_DEBUG*/
37#undef FRAME_FILTER_DEBUG
38/*#define FRAME_FILTER_DEBUG*/
39#ifdef GMAC_DEBUG
40#define DBG(fmt, args...) printk(fmt, ## args)
41#else
42#define DBG(fmt, args...) do { } while (0)
43#endif
44 31
45static void gmac_dump_regs(unsigned long ioaddr) 32static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
46{ 33 u32 dma_rx)
47 int i;
48 pr_info("\t----------------------------------------------\n"
49 "\t GMAC registers (base addr = 0x%8x)\n"
50 "\t----------------------------------------------\n",
51 (unsigned int)ioaddr);
52
53 for (i = 0; i < 55; i++) {
54 int offset = i * 4;
55 pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
56 offset, readl(ioaddr + offset));
57 }
58 return;
59}
60
61static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
62{ 34{
63 u32 value = readl(ioaddr + DMA_BUS_MODE); 35 u32 value = readl(ioaddr + DMA_BUS_MODE);
64 /* DMA SW reset */ 36 /* DMA SW reset */
@@ -87,7 +59,7 @@ static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
87} 59}
88 60
89/* Transmit FIFO flush operation */ 61/* Transmit FIFO flush operation */
90static void gmac_flush_tx_fifo(unsigned long ioaddr) 62static void dwmac1000_flush_tx_fifo(unsigned long ioaddr)
91{ 63{
92 u32 csr6 = readl(ioaddr + DMA_CONTROL); 64 u32 csr6 = readl(ioaddr + DMA_CONTROL);
93 writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL); 65 writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
@@ -95,7 +67,7 @@ static void gmac_flush_tx_fifo(unsigned long ioaddr)
95 do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF)); 67 do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
96} 68}
97 69
98static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode, 70static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
99 int rxmode) 71 int rxmode)
100{ 72{
101 u32 csr6 = readl(ioaddr + DMA_CONTROL); 73 u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -148,13 +120,13 @@ static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
148} 120}
149 121
150/* Not yet implemented --- no RMON module */ 122/* Not yet implemented --- no RMON module */
151static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, 123static void dwmac1000_dma_diagnostic_fr(void *data,
152 unsigned long ioaddr) 124 struct stmmac_extra_stats *x, unsigned long ioaddr)
153{ 125{
154 return; 126 return;
155} 127}
156 128
157static void gmac_dump_dma_regs(unsigned long ioaddr) 129static void dwmac1000_dump_dma_regs(unsigned long ioaddr)
158{ 130{
159 int i; 131 int i;
160 pr_info(" DMA registers\n"); 132 pr_info(" DMA registers\n");
@@ -169,8 +141,9 @@ static void gmac_dump_dma_regs(unsigned long ioaddr)
169 return; 141 return;
170} 142}
171 143
172static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x, 144static int dwmac1000_get_tx_frame_status(void *data,
173 struct dma_desc *p, unsigned long ioaddr) 145 struct stmmac_extra_stats *x,
146 struct dma_desc *p, unsigned long ioaddr)
174{ 147{
175 int ret = 0; 148 int ret = 0;
176 struct net_device_stats *stats = (struct net_device_stats *)data; 149 struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -185,7 +158,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
185 if (unlikely(p->des01.etx.frame_flushed)) { 158 if (unlikely(p->des01.etx.frame_flushed)) {
186 DBG(KERN_ERR "\tframe_flushed error\n"); 159 DBG(KERN_ERR "\tframe_flushed error\n");
187 x->tx_frame_flushed++; 160 x->tx_frame_flushed++;
188 gmac_flush_tx_fifo(ioaddr); 161 dwmac1000_flush_tx_fifo(ioaddr);
189 } 162 }
190 163
191 if (unlikely(p->des01.etx.loss_carrier)) { 164 if (unlikely(p->des01.etx.loss_carrier)) {
@@ -213,7 +186,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
213 186
214 if (unlikely(p->des01.etx.underflow_error)) { 187 if (unlikely(p->des01.etx.underflow_error)) {
215 DBG(KERN_ERR "\tunderflow error\n"); 188 DBG(KERN_ERR "\tunderflow error\n");
216 gmac_flush_tx_fifo(ioaddr); 189 dwmac1000_flush_tx_fifo(ioaddr);
217 x->tx_underflow++; 190 x->tx_underflow++;
218 } 191 }
219 192
@@ -225,7 +198,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
225 if (unlikely(p->des01.etx.payload_error)) { 198 if (unlikely(p->des01.etx.payload_error)) {
226 DBG(KERN_ERR "\tAddr/Payload csum error\n"); 199 DBG(KERN_ERR "\tAddr/Payload csum error\n");
227 x->tx_payload_error++; 200 x->tx_payload_error++;
228 gmac_flush_tx_fifo(ioaddr); 201 dwmac1000_flush_tx_fifo(ioaddr);
229 } 202 }
230 203
231 ret = -1; 204 ret = -1;
@@ -245,19 +218,19 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
245 return ret; 218 return ret;
246} 219}
247 220
248static int gmac_get_tx_len(struct dma_desc *p) 221static int dwmac1000_get_tx_len(struct dma_desc *p)
249{ 222{
250 return p->des01.etx.buffer1_size; 223 return p->des01.etx.buffer1_size;
251} 224}
252 225
253static int gmac_coe_rdes0(int ipc_err, int type, int payload_err) 226static int dwmac1000_coe_rdes0(int ipc_err, int type, int payload_err)
254{ 227{
255 int ret = good_frame; 228 int ret = good_frame;
256 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7; 229 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
257 230
258 /* bits 5 7 0 | Frame status 231 /* bits 5 7 0 | Frame status
259 * ---------------------------------------------------------- 232 * ----------------------------------------------------------
260 * 0 0 0 | IEEE 802.3 Type frame (lenght < 1536 octects) 233 * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
261 * 1 0 0 | IPv4/6 No CSUM errorS. 234 * 1 0 0 | IPv4/6 No CSUM errorS.
262 * 1 0 1 | IPv4/6 CSUM PAYLOAD error 235 * 1 0 1 | IPv4/6 CSUM PAYLOAD error
263 * 1 1 0 | IPv4/6 CSUM IP HR error 236 * 1 1 0 | IPv4/6 CSUM IP HR error
@@ -293,8 +266,8 @@ static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
293 return ret; 266 return ret;
294} 267}
295 268
296static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x, 269static int dwmac1000_get_rx_frame_status(void *data,
297 struct dma_desc *p) 270 struct stmmac_extra_stats *x, struct dma_desc *p)
298{ 271{
299 int ret = good_frame; 272 int ret = good_frame;
300 struct net_device_stats *stats = (struct net_device_stats *)data; 273 struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -339,7 +312,7 @@ static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
339 * It doesn't match with the information reported into the databook. 312 * It doesn't match with the information reported into the databook.
340 * At any rate, we need to understand if the CSUM hw computation is ok 313 * At any rate, we need to understand if the CSUM hw computation is ok
341 * and report this info to the upper layers. */ 314 * and report this info to the upper layers. */
342 ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error, 315 ret = dwmac1000_coe_rdes0(p->des01.erx.ipc_csum_error,
343 p->des01.erx.frame_type, p->des01.erx.payload_csum_error); 316 p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
344 317
345 if (unlikely(p->des01.erx.dribbling)) { 318 if (unlikely(p->des01.erx.dribbling)) {
@@ -358,7 +331,7 @@ static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
358 } 331 }
359 if (unlikely(p->des01.erx.length_error)) { 332 if (unlikely(p->des01.erx.length_error)) {
360 DBG(KERN_ERR "GMAC RX: length_error error\n"); 333 DBG(KERN_ERR "GMAC RX: length_error error\n");
361 x->rx_lenght++; 334 x->rx_length++;
362 ret = discard_frame; 335 ret = discard_frame;
363 } 336 }
364#ifdef STMMAC_VLAN_TAG_USED 337#ifdef STMMAC_VLAN_TAG_USED
@@ -370,181 +343,7 @@ static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
370 return ret; 343 return ret;
371} 344}
372 345
373static void gmac_irq_status(unsigned long ioaddr) 346static void dwmac1000_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
374{
375 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
376
377 /* Not used events (e.g. MMC interrupts) are not handled. */
378 if ((intr_status & mmc_tx_irq))
379 DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
380 readl(ioaddr + GMAC_MMC_TX_INTR));
381 if (unlikely(intr_status & mmc_rx_irq))
382 DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
383 readl(ioaddr + GMAC_MMC_RX_INTR));
384 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
385 DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
386 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
387 if (unlikely(intr_status & pmt_irq)) {
388 DBG(KERN_DEBUG "GMAC: received Magic frame\n");
389 /* clear the PMT bits 5 and 6 by reading the PMT
390 * status register. */
391 readl(ioaddr + GMAC_PMT);
392 }
393
394 return;
395}
396
397static void gmac_core_init(unsigned long ioaddr)
398{
399 u32 value = readl(ioaddr + GMAC_CONTROL);
400 value |= GMAC_CORE_INIT;
401 writel(value, ioaddr + GMAC_CONTROL);
402
403 /* STBus Bridge Configuration */
404 /*writel(0xc5608, ioaddr + 0x00007000);*/
405
406 /* Freeze MMC counters */
407 writel(0x8, ioaddr + GMAC_MMC_CTRL);
408 /* Mask GMAC interrupts */
409 writel(0x207, ioaddr + GMAC_INT_MASK);
410
411#ifdef STMMAC_VLAN_TAG_USED
412 /* Tag detection without filtering */
413 writel(0x0, ioaddr + GMAC_VLAN_TAG);
414#endif
415 return;
416}
417
418static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
419 unsigned int reg_n)
420{
421 stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
422 GMAC_ADDR_LOW(reg_n));
423}
424
425static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
426 unsigned int reg_n)
427{
428 stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
429 GMAC_ADDR_LOW(reg_n));
430}
431
432static void gmac_set_filter(struct net_device *dev)
433{
434 unsigned long ioaddr = dev->base_addr;
435 unsigned int value = 0;
436
437 DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
438 __func__, dev->mc_count, dev->uc_count);
439
440 if (dev->flags & IFF_PROMISC)
441 value = GMAC_FRAME_FILTER_PR;
442 else if ((dev->mc_count > HASH_TABLE_SIZE)
443 || (dev->flags & IFF_ALLMULTI)) {
444 value = GMAC_FRAME_FILTER_PM; /* pass all multi */
445 writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
446 writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
447 } else if (dev->mc_count > 0) {
448 int i;
449 u32 mc_filter[2];
450 struct dev_mc_list *mclist;
451
452 /* Hash filter for multicast */
453 value = GMAC_FRAME_FILTER_HMC;
454
455 memset(mc_filter, 0, sizeof(mc_filter));
456 for (i = 0, mclist = dev->mc_list;
457 mclist && i < dev->mc_count; i++, mclist = mclist->next) {
458 /* The upper 6 bits of the calculated CRC are used to
459 index the contens of the hash table */
460 int bit_nr =
461 bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
462 /* The most significant bit determines the register to
463 * use (H/L) while the other 5 bits determine the bit
464 * within the register. */
465 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
466 }
467 writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
468 writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
469 }
470
471 /* Handle multiple unicast addresses (perfect filtering)*/
472 if (dev->uc_count > GMAC_MAX_UNICAST_ADDRESSES)
473 /* Switch to promiscuous mode is more than 16 addrs
474 are required */
475 value |= GMAC_FRAME_FILTER_PR;
476 else {
477 int i;
478 struct dev_addr_list *uc_ptr = dev->uc_list;
479
480 for (i = 0; i < dev->uc_count; i++) {
481 gmac_set_umac_addr(ioaddr, uc_ptr->da_addr,
482 i + 1);
483
484 DBG(KERN_INFO "\t%d "
485 "- Unicast addr %02x:%02x:%02x:%02x:%02x:"
486 "%02x\n", i + 1,
487 uc_ptr->da_addr[0], uc_ptr->da_addr[1],
488 uc_ptr->da_addr[2], uc_ptr->da_addr[3],
489 uc_ptr->da_addr[4], uc_ptr->da_addr[5]);
490 uc_ptr = uc_ptr->next;
491 }
492 }
493
494#ifdef FRAME_FILTER_DEBUG
495 /* Enable Receive all mode (to debug filtering_fail errors) */
496 value |= GMAC_FRAME_FILTER_RA;
497#endif
498 writel(value, ioaddr + GMAC_FRAME_FILTER);
499
500 DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
501 "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
502 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
503
504 return;
505}
506
507static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
508 unsigned int fc, unsigned int pause_time)
509{
510 unsigned int flow = 0;
511
512 DBG(KERN_DEBUG "GMAC Flow-Control:\n");
513 if (fc & FLOW_RX) {
514 DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
515 flow |= GMAC_FLOW_CTRL_RFE;
516 }
517 if (fc & FLOW_TX) {
518 DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
519 flow |= GMAC_FLOW_CTRL_TFE;
520 }
521
522 if (duplex) {
523 DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
524 flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
525 }
526
527 writel(flow, ioaddr + GMAC_FLOW_CTRL);
528 return;
529}
530
531static void gmac_pmt(unsigned long ioaddr, unsigned long mode)
532{
533 unsigned int pmt = 0;
534
535 if (mode == WAKE_MAGIC) {
536 DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
537 pmt |= power_down | magic_pkt_en;
538 } else if (mode == WAKE_UCAST) {
539 DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
540 pmt |= global_unicast;
541 }
542
543 writel(pmt, ioaddr + GMAC_PMT);
544 return;
545}
546
547static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
548 int disable_rx_ic) 347 int disable_rx_ic)
549{ 348{
550 int i; 349 int i;
@@ -562,7 +361,7 @@ static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
562 return; 361 return;
563} 362}
564 363
565static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size) 364static void dwmac1000_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
566{ 365{
567 int i; 366 int i;
568 367
@@ -576,32 +375,32 @@ static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
576 return; 375 return;
577} 376}
578 377
579static int gmac_get_tx_owner(struct dma_desc *p) 378static int dwmac1000_get_tx_owner(struct dma_desc *p)
580{ 379{
581 return p->des01.etx.own; 380 return p->des01.etx.own;
582} 381}
583 382
584static int gmac_get_rx_owner(struct dma_desc *p) 383static int dwmac1000_get_rx_owner(struct dma_desc *p)
585{ 384{
586 return p->des01.erx.own; 385 return p->des01.erx.own;
587} 386}
588 387
589static void gmac_set_tx_owner(struct dma_desc *p) 388static void dwmac1000_set_tx_owner(struct dma_desc *p)
590{ 389{
591 p->des01.etx.own = 1; 390 p->des01.etx.own = 1;
592} 391}
593 392
594static void gmac_set_rx_owner(struct dma_desc *p) 393static void dwmac1000_set_rx_owner(struct dma_desc *p)
595{ 394{
596 p->des01.erx.own = 1; 395 p->des01.erx.own = 1;
597} 396}
598 397
599static int gmac_get_tx_ls(struct dma_desc *p) 398static int dwmac1000_get_tx_ls(struct dma_desc *p)
600{ 399{
601 return p->des01.etx.last_segment; 400 return p->des01.etx.last_segment;
602} 401}
603 402
604static void gmac_release_tx_desc(struct dma_desc *p) 403static void dwmac1000_release_tx_desc(struct dma_desc *p)
605{ 404{
606 int ter = p->des01.etx.end_ring; 405 int ter = p->des01.etx.end_ring;
607 406
@@ -611,7 +410,7 @@ static void gmac_release_tx_desc(struct dma_desc *p)
611 return; 410 return;
612} 411}
613 412
614static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, 413static void dwmac1000_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
615 int csum_flag) 414 int csum_flag)
616{ 415{
617 p->des01.etx.first_segment = is_fs; 416 p->des01.etx.first_segment = is_fs;
@@ -625,69 +424,51 @@ static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
625 p->des01.etx.checksum_insertion = cic_full; 424 p->des01.etx.checksum_insertion = cic_full;
626} 425}
627 426
628static void gmac_clear_tx_ic(struct dma_desc *p) 427static void dwmac1000_clear_tx_ic(struct dma_desc *p)
629{ 428{
630 p->des01.etx.interrupt = 0; 429 p->des01.etx.interrupt = 0;
631} 430}
632 431
633static void gmac_close_tx_desc(struct dma_desc *p) 432static void dwmac1000_close_tx_desc(struct dma_desc *p)
634{ 433{
635 p->des01.etx.last_segment = 1; 434 p->des01.etx.last_segment = 1;
636 p->des01.etx.interrupt = 1; 435 p->des01.etx.interrupt = 1;
637} 436}
638 437
639static int gmac_get_rx_frame_len(struct dma_desc *p) 438static int dwmac1000_get_rx_frame_len(struct dma_desc *p)
640{ 439{
641 return p->des01.erx.frame_length; 440 return p->des01.erx.frame_length;
642} 441}
643 442
644struct stmmac_ops gmac_driver = { 443struct stmmac_dma_ops dwmac1000_dma_ops = {
645 .core_init = gmac_core_init, 444 .init = dwmac1000_dma_init,
646 .dump_mac_regs = gmac_dump_regs, 445 .dump_regs = dwmac1000_dump_dma_regs,
647 .dma_init = gmac_dma_init, 446 .dma_mode = dwmac1000_dma_operation_mode,
648 .dump_dma_regs = gmac_dump_dma_regs, 447 .dma_diagnostic_fr = dwmac1000_dma_diagnostic_fr,
649 .dma_mode = gmac_dma_operation_mode, 448 .enable_dma_transmission = dwmac_enable_dma_transmission,
650 .dma_diagnostic_fr = gmac_dma_diagnostic_fr, 449 .enable_dma_irq = dwmac_enable_dma_irq,
651 .tx_status = gmac_get_tx_frame_status, 450 .disable_dma_irq = dwmac_disable_dma_irq,
652 .rx_status = gmac_get_rx_frame_status, 451 .start_tx = dwmac_dma_start_tx,
653 .get_tx_len = gmac_get_tx_len, 452 .stop_tx = dwmac_dma_stop_tx,
654 .set_filter = gmac_set_filter, 453 .start_rx = dwmac_dma_start_rx,
655 .flow_ctrl = gmac_flow_ctrl, 454 .stop_rx = dwmac_dma_stop_rx,
656 .pmt = gmac_pmt, 455 .dma_interrupt = dwmac_dma_interrupt,
657 .init_rx_desc = gmac_init_rx_desc,
658 .init_tx_desc = gmac_init_tx_desc,
659 .get_tx_owner = gmac_get_tx_owner,
660 .get_rx_owner = gmac_get_rx_owner,
661 .release_tx_desc = gmac_release_tx_desc,
662 .prepare_tx_desc = gmac_prepare_tx_desc,
663 .clear_tx_ic = gmac_clear_tx_ic,
664 .close_tx_desc = gmac_close_tx_desc,
665 .get_tx_ls = gmac_get_tx_ls,
666 .set_tx_owner = gmac_set_tx_owner,
667 .set_rx_owner = gmac_set_rx_owner,
668 .get_rx_frame_len = gmac_get_rx_frame_len,
669 .host_irq_status = gmac_irq_status,
670 .set_umac_addr = gmac_set_umac_addr,
671 .get_umac_addr = gmac_get_umac_addr,
672}; 456};
673 457
674struct mac_device_info *gmac_setup(unsigned long ioaddr) 458struct stmmac_desc_ops dwmac1000_desc_ops = {
675{ 459 .tx_status = dwmac1000_get_tx_frame_status,
676 struct mac_device_info *mac; 460 .rx_status = dwmac1000_get_rx_frame_status,
677 u32 uid = readl(ioaddr + GMAC_VERSION); 461 .get_tx_len = dwmac1000_get_tx_len,
678 462 .init_rx_desc = dwmac1000_init_rx_desc,
679 pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n", 463 .init_tx_desc = dwmac1000_init_tx_desc,
680 ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff)); 464 .get_tx_owner = dwmac1000_get_tx_owner,
681 465 .get_rx_owner = dwmac1000_get_rx_owner,
682 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); 466 .release_tx_desc = dwmac1000_release_tx_desc,
683 467 .prepare_tx_desc = dwmac1000_prepare_tx_desc,
684 mac->ops = &gmac_driver; 468 .clear_tx_ic = dwmac1000_clear_tx_ic,
685 mac->hw.pmt = PMT_SUPPORTED; 469 .close_tx_desc = dwmac1000_close_tx_desc,
686 mac->hw.link.port = GMAC_CONTROL_PS; 470 .get_tx_ls = dwmac1000_get_tx_ls,
687 mac->hw.link.duplex = GMAC_CONTROL_DM; 471 .set_tx_owner = dwmac1000_set_tx_owner,
688 mac->hw.link.speed = GMAC_CONTROL_FES; 472 .set_rx_owner = dwmac1000_set_rx_owner,
689 mac->hw.mii.addr = GMAC_MII_ADDR; 473 .get_rx_frame_len = dwmac1000_get_rx_frame_len,
690 mac->hw.mii.data = GMAC_MII_DATA; 474};
691
692 return mac;
693}
diff --git a/drivers/net/stmmac/dwmac_dma.h b/drivers/net/stmmac/dwmac_dma.h
new file mode 100644
index 000000000000..de848d9f6060
--- /dev/null
+++ b/drivers/net/stmmac/dwmac_dma.h
@@ -0,0 +1,107 @@
1/*******************************************************************************
2 DWMAC DMA Header file.
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25/* DMA CRS Control and Status Register Mapping */
26#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
27#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
28#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
29#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
30#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
31#define DMA_STATUS 0x00001014 /* Status Register */
32#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
33#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
34#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
35#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
36#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
37
38/* DMA Control register defines */
39#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
40#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
41
42/* DMA Normal interrupt */
43#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
44#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
45#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
46#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
47#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
48
49#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
50 DMA_INTR_ENA_TIE)
51
52/* DMA Abnormal interrupt */
53#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
54#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
55#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
56#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
57#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
58#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
59#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
60#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
61#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
62#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
63
64#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
65 DMA_INTR_ENA_UNE)
66
67/* DMA default interrupt mask */
68#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
69
70/* DMA Status register defines */
71#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
72#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
73#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
74#define DMA_STATUS_GMI 0x08000000
75#define DMA_STATUS_GLI 0x04000000
76#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
77#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
78#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
79#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
80#define DMA_STATUS_TS_SHIFT 20
81#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
82#define DMA_STATUS_RS_SHIFT 17
83#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
84#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
85#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
86#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
87#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
88#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
89#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
90#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
91#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
92#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
93#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
94#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
95#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
96#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
97#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
98
99extern void dwmac_enable_dma_transmission(unsigned long ioaddr);
100extern void dwmac_enable_dma_irq(unsigned long ioaddr);
101extern void dwmac_disable_dma_irq(unsigned long ioaddr);
102extern void dwmac_dma_start_tx(unsigned long ioaddr);
103extern void dwmac_dma_stop_tx(unsigned long ioaddr);
104extern void dwmac_dma_start_rx(unsigned long ioaddr);
105extern void dwmac_dma_stop_rx(unsigned long ioaddr);
106extern int dwmac_dma_interrupt(unsigned long ioaddr,
107 struct stmmac_extra_stats *x);
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
new file mode 100644
index 000000000000..d4adb1eaa447
--- /dev/null
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -0,0 +1,263 @@
1/*******************************************************************************
2 Copyright (C) 2007-2009 STMicroelectronics Ltd
3
4 This program is free software; you can redistribute it and/or modify it
5 under the terms and conditions of the GNU General Public License,
6 version 2, as published by the Free Software Foundation.
7
8 This program is distributed in the hope it will be useful, but WITHOUT
9 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 more details.
12
13 You should have received a copy of the GNU General Public License along with
14 this program; if not, write to the Free Software Foundation, Inc.,
15 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16
17 The full GNU General Public License is included in this distribution in
18 the file called "COPYING".
19
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/
22
23#include <linux/io.h>
24#include "common.h"
25#include "dwmac_dma.h"
26
27#undef DWMAC_DMA_DEBUG
28#ifdef DWMAC_DMA_DEBUG
29#define DBG(fmt, args...) printk(fmt, ## args)
30#else
31#define DBG(fmt, args...) do { } while (0)
32#endif
33
34/* CSR1 enables the transmit DMA to check for new descriptor */
35void dwmac_enable_dma_transmission(unsigned long ioaddr)
36{
37 writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
38}
39
40void dwmac_enable_dma_irq(unsigned long ioaddr)
41{
42 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
43}
44
45void dwmac_disable_dma_irq(unsigned long ioaddr)
46{
47 writel(0, ioaddr + DMA_INTR_ENA);
48}
49
50void dwmac_dma_start_tx(unsigned long ioaddr)
51{
52 u32 value = readl(ioaddr + DMA_CONTROL);
53 value |= DMA_CONTROL_ST;
54 writel(value, ioaddr + DMA_CONTROL);
55 return;
56}
57
58void dwmac_dma_stop_tx(unsigned long ioaddr)
59{
60 u32 value = readl(ioaddr + DMA_CONTROL);
61 value &= ~DMA_CONTROL_ST;
62 writel(value, ioaddr + DMA_CONTROL);
63 return;
64}
65
66void dwmac_dma_start_rx(unsigned long ioaddr)
67{
68 u32 value = readl(ioaddr + DMA_CONTROL);
69 value |= DMA_CONTROL_SR;
70 writel(value, ioaddr + DMA_CONTROL);
71
72 return;
73}
74
75void dwmac_dma_stop_rx(unsigned long ioaddr)
76{
77 u32 value = readl(ioaddr + DMA_CONTROL);
78 value &= ~DMA_CONTROL_SR;
79 writel(value, ioaddr + DMA_CONTROL);
80
81 return;
82}
83
84#ifdef DWMAC_DMA_DEBUG
85static void show_tx_process_state(unsigned int status)
86{
87 unsigned int state;
88 state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
89
90 switch (state) {
91 case 0:
92 pr_info("- TX (Stopped): Reset or Stop command\n");
93 break;
94 case 1:
95 pr_info("- TX (Running):Fetching the Tx desc\n");
96 break;
97 case 2:
98 pr_info("- TX (Running): Waiting for end of tx\n");
99 break;
100 case 3:
101 pr_info("- TX (Running): Reading the data "
102 "and queuing the data into the Tx buf\n");
103 break;
104 case 6:
105 pr_info("- TX (Suspended): Tx Buff Underflow "
106 "or an unavailable Transmit descriptor\n");
107 break;
108 case 7:
109 pr_info("- TX (Running): Closing Tx descriptor\n");
110 break;
111 default:
112 break;
113 }
114 return;
115}
116
117static void show_rx_process_state(unsigned int status)
118{
119 unsigned int state;
120 state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
121
122 switch (state) {
123 case 0:
124 pr_info("- RX (Stopped): Reset or Stop command\n");
125 break;
126 case 1:
127 pr_info("- RX (Running): Fetching the Rx desc\n");
128 break;
129 case 2:
130 pr_info("- RX (Running):Checking for end of pkt\n");
131 break;
132 case 3:
133 pr_info("- RX (Running): Waiting for Rx pkt\n");
134 break;
135 case 4:
136 pr_info("- RX (Suspended): Unavailable Rx buf\n");
137 break;
138 case 5:
139 pr_info("- RX (Running): Closing Rx descriptor\n");
140 break;
141 case 6:
142 pr_info("- RX(Running): Flushing the current frame"
143 " from the Rx buf\n");
144 break;
145 case 7:
146 pr_info("- RX (Running): Queuing the Rx frame"
147 " from the Rx buf into memory\n");
148 break;
149 default:
150 break;
151 }
152 return;
153}
154#endif
155
156int dwmac_dma_interrupt(unsigned long ioaddr,
157 struct stmmac_extra_stats *x)
158{
159 int ret = 0;
160 /* read the status register (CSR5) */
161 u32 intr_status = readl(ioaddr + DMA_STATUS);
162
163 DBG(INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
164#ifdef DWMAC_DMA_DEBUG
165 /* It displays the DMA process states (CSR5 register) */
166 show_tx_process_state(intr_status);
167 show_rx_process_state(intr_status);
168#endif
169 /* ABNORMAL interrupts */
170 if (unlikely(intr_status & DMA_STATUS_AIS)) {
171 DBG(INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
172 if (unlikely(intr_status & DMA_STATUS_UNF)) {
173 DBG(INFO, "transmit underflow\n");
174 ret = tx_hard_error_bump_tc;
175 x->tx_undeflow_irq++;
176 }
177 if (unlikely(intr_status & DMA_STATUS_TJT)) {
178 DBG(INFO, "transmit jabber\n");
179 x->tx_jabber_irq++;
180 }
181 if (unlikely(intr_status & DMA_STATUS_OVF)) {
182 DBG(INFO, "recv overflow\n");
183 x->rx_overflow_irq++;
184 }
185 if (unlikely(intr_status & DMA_STATUS_RU)) {
186 DBG(INFO, "receive buffer unavailable\n");
187 x->rx_buf_unav_irq++;
188 }
189 if (unlikely(intr_status & DMA_STATUS_RPS)) {
190 DBG(INFO, "receive process stopped\n");
191 x->rx_process_stopped_irq++;
192 }
193 if (unlikely(intr_status & DMA_STATUS_RWT)) {
194 DBG(INFO, "receive watchdog\n");
195 x->rx_watchdog_irq++;
196 }
197 if (unlikely(intr_status & DMA_STATUS_ETI)) {
198 DBG(INFO, "transmit early interrupt\n");
199 x->tx_early_irq++;
200 }
201 if (unlikely(intr_status & DMA_STATUS_TPS)) {
202 DBG(INFO, "transmit process stopped\n");
203 x->tx_process_stopped_irq++;
204 ret = tx_hard_error;
205 }
206 if (unlikely(intr_status & DMA_STATUS_FBI)) {
207 DBG(INFO, "fatal bus error\n");
208 x->fatal_bus_error_irq++;
209 ret = tx_hard_error;
210 }
211 }
212 /* TX/RX NORMAL interrupts */
213 if (intr_status & DMA_STATUS_NIS) {
214 x->normal_irq_n++;
215 if (likely((intr_status & DMA_STATUS_RI) ||
216 (intr_status & (DMA_STATUS_TI))))
217 ret = handle_tx_rx;
218 }
219 /* Optional hardware blocks, interrupts should be disabled */
220 if (unlikely(intr_status &
221 (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
222 pr_info("%s: unexpected status %08x\n", __func__, intr_status);
223 /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
224 writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
225
226 DBG(INFO, "\n\n");
227 return ret;
228}
229
230
231void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
232 unsigned int high, unsigned int low)
233{
234 unsigned long data;
235
236 data = (addr[5] << 8) | addr[4];
237 writel(data, ioaddr + high);
238 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
239 writel(data, ioaddr + low);
240
241 return;
242}
243
244void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
245 unsigned int high, unsigned int low)
246{
247 unsigned int hi_addr, lo_addr;
248
249 /* Read the MAC address from the hardware */
250 hi_addr = readl(ioaddr + high);
251 lo_addr = readl(ioaddr + low);
252
253 /* Extract the MAC address from the high and low words */
254 addr[0] = lo_addr & 0xff;
255 addr[1] = (lo_addr >> 8) & 0xff;
256 addr[2] = (lo_addr >> 16) & 0xff;
257 addr[3] = (lo_addr >> 24) & 0xff;
258 addr[4] = hi_addr & 0xff;
259 addr[5] = (hi_addr >> 8) & 0xff;
260
261 return;
262}
263
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index 6d2eae3040e5..ba35e6943cf4 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -20,7 +20,8 @@
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/ 21*******************************************************************************/
22 22
23#define DRV_MODULE_VERSION "Oct_09" 23#define DRV_MODULE_VERSION "Jan_2010"
24#include <linux/stmmac.h>
24 25
25#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 26#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
26#define STMMAC_VLAN_TAG_USED 27#define STMMAC_VLAN_TAG_USED
@@ -57,7 +58,7 @@ struct stmmac_priv {
57 int rx_csum; 58 int rx_csum;
58 unsigned int dma_buf_sz; 59 unsigned int dma_buf_sz;
59 struct device *device; 60 struct device *device;
60 struct mac_device_info *mac_type; 61 struct mac_device_info *hw;
61 62
62 struct stmmac_extra_stats xstats; 63 struct stmmac_extra_stats xstats;
63 struct napi_struct napi; 64 struct napi_struct napi;
@@ -69,6 +70,7 @@ struct stmmac_priv {
69 int phy_mask; 70 int phy_mask;
70 int (*phy_reset) (void *priv); 71 int (*phy_reset) (void *priv);
71 void (*fix_mac_speed) (void *priv, unsigned int speed); 72 void (*fix_mac_speed) (void *priv, unsigned int speed);
73 void (*bus_setup)(unsigned long ioaddr);
72 void *bsp_priv; 74 void *bsp_priv;
73 75
74 int phy_irq; 76 int phy_irq;
@@ -93,6 +95,28 @@ struct stmmac_priv {
93#endif 95#endif
94}; 96};
95 97
98#ifdef CONFIG_STM_DRIVERS
99#include <linux/stm/pad.h>
100static inline int stmmac_claim_resource(struct platform_device *pdev)
101{
102 int ret = 0;
103 struct plat_stmmacenet_data *plat_dat = pdev->dev.platform_data;
104
105 /* Pad routing setup */
106 if (IS_ERR(devm_stm_pad_claim(&pdev->dev, plat_dat->pad_config,
107 dev_name(&pdev->dev)))) {
108 printk(KERN_ERR "%s: Failed to request pads!\n", __func__);
109 ret = -ENODEV;
110 }
111 return ret;
112}
113#else
114static inline int stmmac_claim_resource(struct platform_device *pdev)
115{
116 return 0;
117}
118#endif
119
96extern int stmmac_mdio_unregister(struct net_device *ndev); 120extern int stmmac_mdio_unregister(struct net_device *ndev);
97extern int stmmac_mdio_register(struct net_device *ndev); 121extern int stmmac_mdio_register(struct net_device *ndev);
98extern void stmmac_set_ethtool_ops(struct net_device *netdev); 122extern void stmmac_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index 694ebe6a0758..c021eaa3ca69 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -28,6 +28,7 @@
28#include <linux/phy.h> 28#include <linux/phy.h>
29 29
30#include "stmmac.h" 30#include "stmmac.h"
31#include "dwmac_dma.h"
31 32
32#define REG_SPACE_SIZE 0x1054 33#define REG_SPACE_SIZE 0x1054
33#define MAC100_ETHTOOL_NAME "st_mac100" 34#define MAC100_ETHTOOL_NAME "st_mac100"
@@ -61,7 +62,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
61 STMMAC_STAT(rx_toolong), 62 STMMAC_STAT(rx_toolong),
62 STMMAC_STAT(rx_collision), 63 STMMAC_STAT(rx_collision),
63 STMMAC_STAT(rx_crc), 64 STMMAC_STAT(rx_crc),
64 STMMAC_STAT(rx_lenght), 65 STMMAC_STAT(rx_length),
65 STMMAC_STAT(rx_mii), 66 STMMAC_STAT(rx_mii),
66 STMMAC_STAT(rx_multicast), 67 STMMAC_STAT(rx_multicast),
67 STMMAC_STAT(rx_gmac_overflow), 68 STMMAC_STAT(rx_gmac_overflow),
@@ -268,8 +269,8 @@ stmmac_set_pauseparam(struct net_device *netdev,
268 } 269 }
269 } else { 270 } else {
270 unsigned long ioaddr = netdev->base_addr; 271 unsigned long ioaddr = netdev->base_addr;
271 priv->mac_type->ops->flow_ctrl(ioaddr, phy->duplex, 272 priv->hw->mac->flow_ctrl(ioaddr, phy->duplex,
272 priv->flow_ctrl, priv->pause); 273 priv->flow_ctrl, priv->pause);
273 } 274 }
274 spin_unlock(&priv->lock); 275 spin_unlock(&priv->lock);
275 return ret; 276 return ret;
@@ -283,8 +284,8 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
283 int i; 284 int i;
284 285
285 /* Update HW stats if supported */ 286 /* Update HW stats if supported */
286 priv->mac_type->ops->dma_diagnostic_fr(&dev->stats, &priv->xstats, 287 priv->hw->dma->dma_diagnostic_fr(&dev->stats, (void *) &priv->xstats,
287 ioaddr); 288 ioaddr);
288 289
289 for (i = 0; i < STMMAC_STATS_LEN; i++) { 290 for (i = 0; i < STMMAC_STATS_LEN; i++) {
290 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; 291 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 508fba8fa07f..a6733612d64a 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -32,7 +32,6 @@
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h> 35#include <linux/etherdevice.h>
37#include <linux/platform_device.h> 36#include <linux/platform_device.h>
38#include <linux/ip.h> 37#include <linux/ip.h>
@@ -45,7 +44,6 @@
45#include <linux/phy.h> 44#include <linux/phy.h>
46#include <linux/if_vlan.h> 45#include <linux/if_vlan.h>
47#include <linux/dma-mapping.h> 46#include <linux/dma-mapping.h>
48#include <linux/stm/soc.h>
49#include "stmmac.h" 47#include "stmmac.h"
50 48
51#define STMMAC_RESOURCE_NAME "stmmaceth" 49#define STMMAC_RESOURCE_NAME "stmmaceth"
@@ -226,41 +224,38 @@ static void stmmac_adjust_link(struct net_device *dev)
226 if (phydev->duplex != priv->oldduplex) { 224 if (phydev->duplex != priv->oldduplex) {
227 new_state = 1; 225 new_state = 1;
228 if (!(phydev->duplex)) 226 if (!(phydev->duplex))
229 ctrl &= ~priv->mac_type->hw.link.duplex; 227 ctrl &= ~priv->hw->link.duplex;
230 else 228 else
231 ctrl |= priv->mac_type->hw.link.duplex; 229 ctrl |= priv->hw->link.duplex;
232 priv->oldduplex = phydev->duplex; 230 priv->oldduplex = phydev->duplex;
233 } 231 }
234 /* Flow Control operation */ 232 /* Flow Control operation */
235 if (phydev->pause) 233 if (phydev->pause)
236 priv->mac_type->ops->flow_ctrl(ioaddr, phydev->duplex, 234 priv->hw->mac->flow_ctrl(ioaddr, phydev->duplex,
237 fc, pause_time); 235 fc, pause_time);
238 236
239 if (phydev->speed != priv->speed) { 237 if (phydev->speed != priv->speed) {
240 new_state = 1; 238 new_state = 1;
241 switch (phydev->speed) { 239 switch (phydev->speed) {
242 case 1000: 240 case 1000:
243 if (likely(priv->is_gmac)) 241 if (likely(priv->is_gmac))
244 ctrl &= ~priv->mac_type->hw.link.port; 242 ctrl &= ~priv->hw->link.port;
245 break; 243 break;
246 case 100: 244 case 100:
247 case 10: 245 case 10:
248 if (priv->is_gmac) { 246 if (priv->is_gmac) {
249 ctrl |= priv->mac_type->hw.link.port; 247 ctrl |= priv->hw->link.port;
250 if (phydev->speed == SPEED_100) { 248 if (phydev->speed == SPEED_100) {
251 ctrl |= 249 ctrl |= priv->hw->link.speed;
252 priv->mac_type->hw.link.
253 speed;
254 } else { 250 } else {
255 ctrl &= 251 ctrl &= ~(priv->hw->link.speed);
256 ~(priv->mac_type->hw.
257 link.speed);
258 } 252 }
259 } else { 253 } else {
260 ctrl &= ~priv->mac_type->hw.link.port; 254 ctrl &= ~priv->hw->link.port;
261 } 255 }
262 priv->fix_mac_speed(priv->bsp_priv, 256 if (likely(priv->fix_mac_speed))
263 phydev->speed); 257 priv->fix_mac_speed(priv->bsp_priv,
258 phydev->speed);
264 break; 259 break;
265 default: 260 default:
266 if (netif_msg_link(priv)) 261 if (netif_msg_link(priv))
@@ -305,8 +300,8 @@ static int stmmac_init_phy(struct net_device *dev)
305{ 300{
306 struct stmmac_priv *priv = netdev_priv(dev); 301 struct stmmac_priv *priv = netdev_priv(dev);
307 struct phy_device *phydev; 302 struct phy_device *phydev;
308 char phy_id[BUS_ID_SIZE]; /* PHY to connect */ 303 char phy_id[MII_BUS_ID_SIZE + 3];
309 char bus_id[BUS_ID_SIZE]; 304 char bus_id[MII_BUS_ID_SIZE];
310 305
311 priv->oldlink = 0; 306 priv->oldlink = 0;
312 priv->speed = 0; 307 priv->speed = 0;
@@ -318,7 +313,8 @@ static int stmmac_init_phy(struct net_device *dev)
318 } 313 }
319 314
320 snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id); 315 snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
321 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, bus_id, priv->phy_addr); 316 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
317 priv->phy_addr);
322 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id); 318 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
323 319
324 phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0, 320 phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0,
@@ -508,8 +504,8 @@ static void init_dma_desc_rings(struct net_device *dev)
508 priv->cur_tx = 0; 504 priv->cur_tx = 0;
509 505
510 /* Clear the Rx/Tx descriptors */ 506 /* Clear the Rx/Tx descriptors */
511 priv->mac_type->ops->init_rx_desc(priv->dma_rx, rxsize, dis_ic); 507 priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
512 priv->mac_type->ops->init_tx_desc(priv->dma_tx, txsize); 508 priv->hw->desc->init_tx_desc(priv->dma_tx, txsize);
513 509
514 if (netif_msg_hw(priv)) { 510 if (netif_msg_hw(priv)) {
515 pr_info("RX descriptor ring:\n"); 511 pr_info("RX descriptor ring:\n");
@@ -544,8 +540,8 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
544 struct dma_desc *p = priv->dma_tx + i; 540 struct dma_desc *p = priv->dma_tx + i;
545 if (p->des2) 541 if (p->des2)
546 dma_unmap_single(priv->device, p->des2, 542 dma_unmap_single(priv->device, p->des2,
547 priv->mac_type->ops->get_tx_len(p), 543 priv->hw->desc->get_tx_len(p),
548 DMA_TO_DEVICE); 544 DMA_TO_DEVICE);
549 dev_kfree_skb_any(priv->tx_skbuff[i]); 545 dev_kfree_skb_any(priv->tx_skbuff[i]);
550 priv->tx_skbuff[i] = NULL; 546 priv->tx_skbuff[i] = NULL;
551 } 547 }
@@ -575,50 +571,6 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
575} 571}
576 572
577/** 573/**
578 * stmmac_dma_start_tx
579 * @ioaddr: device I/O address
580 * Description: this function starts the DMA tx process.
581 */
582static void stmmac_dma_start_tx(unsigned long ioaddr)
583{
584 u32 value = readl(ioaddr + DMA_CONTROL);
585 value |= DMA_CONTROL_ST;
586 writel(value, ioaddr + DMA_CONTROL);
587 return;
588}
589
590static void stmmac_dma_stop_tx(unsigned long ioaddr)
591{
592 u32 value = readl(ioaddr + DMA_CONTROL);
593 value &= ~DMA_CONTROL_ST;
594 writel(value, ioaddr + DMA_CONTROL);
595 return;
596}
597
598/**
599 * stmmac_dma_start_rx
600 * @ioaddr: device I/O address
601 * Description: this function starts the DMA rx process.
602 */
603static void stmmac_dma_start_rx(unsigned long ioaddr)
604{
605 u32 value = readl(ioaddr + DMA_CONTROL);
606 value |= DMA_CONTROL_SR;
607 writel(value, ioaddr + DMA_CONTROL);
608
609 return;
610}
611
612static void stmmac_dma_stop_rx(unsigned long ioaddr)
613{
614 u32 value = readl(ioaddr + DMA_CONTROL);
615 value &= ~DMA_CONTROL_SR;
616 writel(value, ioaddr + DMA_CONTROL);
617
618 return;
619}
620
621/**
622 * stmmac_dma_operation_mode - HW DMA operation mode 574 * stmmac_dma_operation_mode - HW DMA operation mode
623 * @priv : pointer to the private device structure. 575 * @priv : pointer to the private device structure.
624 * Description: it sets the DMA operation mode: tx/rx DMA thresholds 576 * Description: it sets the DMA operation mode: tx/rx DMA thresholds
@@ -629,18 +581,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
629{ 581{
630 if (!priv->is_gmac) { 582 if (!priv->is_gmac) {
631 /* MAC 10/100 */ 583 /* MAC 10/100 */
632 priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, 0); 584 priv->hw->dma->dma_mode(priv->dev->base_addr, tc, 0);
633 priv->tx_coe = NO_HW_CSUM; 585 priv->tx_coe = NO_HW_CSUM;
634 } else { 586 } else {
635 if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) { 587 if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) {
636 priv->mac_type->ops->dma_mode(priv->dev->base_addr, 588 priv->hw->dma->dma_mode(priv->dev->base_addr,
637 SF_DMA_MODE, SF_DMA_MODE); 589 SF_DMA_MODE, SF_DMA_MODE);
638 tc = SF_DMA_MODE; 590 tc = SF_DMA_MODE;
639 priv->tx_coe = HW_CSUM; 591 priv->tx_coe = HW_CSUM;
640 } else { 592 } else {
641 /* Checksum computation is performed in software. */ 593 /* Checksum computation is performed in software. */
642 priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, 594 priv->hw->dma->dma_mode(priv->dev->base_addr, tc,
643 SF_DMA_MODE); 595 SF_DMA_MODE);
644 priv->tx_coe = NO_HW_CSUM; 596 priv->tx_coe = NO_HW_CSUM;
645 } 597 }
646 } 598 }
@@ -649,88 +601,6 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
649 return; 601 return;
650} 602}
651 603
652#ifdef STMMAC_DEBUG
653/**
654 * show_tx_process_state
655 * @status: tx descriptor status field
656 * Description: it shows the Transmit Process State for CSR5[22:20]
657 */
658static void show_tx_process_state(unsigned int status)
659{
660 unsigned int state;
661 state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
662
663 switch (state) {
664 case 0:
665 pr_info("- TX (Stopped): Reset or Stop command\n");
666 break;
667 case 1:
668 pr_info("- TX (Running):Fetching the Tx desc\n");
669 break;
670 case 2:
671 pr_info("- TX (Running): Waiting for end of tx\n");
672 break;
673 case 3:
674 pr_info("- TX (Running): Reading the data "
675 "and queuing the data into the Tx buf\n");
676 break;
677 case 6:
678 pr_info("- TX (Suspended): Tx Buff Underflow "
679 "or an unavailable Transmit descriptor\n");
680 break;
681 case 7:
682 pr_info("- TX (Running): Closing Tx descriptor\n");
683 break;
684 default:
685 break;
686 }
687 return;
688}
689
690/**
691 * show_rx_process_state
692 * @status: rx descriptor status field
693 * Description: it shows the Receive Process State for CSR5[19:17]
694 */
695static void show_rx_process_state(unsigned int status)
696{
697 unsigned int state;
698 state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
699
700 switch (state) {
701 case 0:
702 pr_info("- RX (Stopped): Reset or Stop command\n");
703 break;
704 case 1:
705 pr_info("- RX (Running): Fetching the Rx desc\n");
706 break;
707 case 2:
708 pr_info("- RX (Running):Checking for end of pkt\n");
709 break;
710 case 3:
711 pr_info("- RX (Running): Waiting for Rx pkt\n");
712 break;
713 case 4:
714 pr_info("- RX (Suspended): Unavailable Rx buf\n");
715 break;
716 case 5:
717 pr_info("- RX (Running): Closing Rx descriptor\n");
718 break;
719 case 6:
720 pr_info("- RX(Running): Flushing the current frame"
721 " from the Rx buf\n");
722 break;
723 case 7:
724 pr_info("- RX (Running): Queuing the Rx frame"
725 " from the Rx buf into memory\n");
726 break;
727 default:
728 break;
729 }
730 return;
731}
732#endif
733
734/** 604/**
735 * stmmac_tx: 605 * stmmac_tx:
736 * @priv: private driver structure 606 * @priv: private driver structure
@@ -748,16 +618,16 @@ static void stmmac_tx(struct stmmac_priv *priv)
748 struct dma_desc *p = priv->dma_tx + entry; 618 struct dma_desc *p = priv->dma_tx + entry;
749 619
750 /* Check if the descriptor is owned by the DMA. */ 620 /* Check if the descriptor is owned by the DMA. */
751 if (priv->mac_type->ops->get_tx_owner(p)) 621 if (priv->hw->desc->get_tx_owner(p))
752 break; 622 break;
753 623
754 /* Verify tx error by looking at the last segment */ 624 /* Verify tx error by looking at the last segment */
755 last = priv->mac_type->ops->get_tx_ls(p); 625 last = priv->hw->desc->get_tx_ls(p);
756 if (likely(last)) { 626 if (likely(last)) {
757 int tx_error = 627 int tx_error =
758 priv->mac_type->ops->tx_status(&priv->dev->stats, 628 priv->hw->desc->tx_status(&priv->dev->stats,
759 &priv->xstats, 629 &priv->xstats, p,
760 p, ioaddr); 630 ioaddr);
761 if (likely(tx_error == 0)) { 631 if (likely(tx_error == 0)) {
762 priv->dev->stats.tx_packets++; 632 priv->dev->stats.tx_packets++;
763 priv->xstats.tx_pkt_n++; 633 priv->xstats.tx_pkt_n++;
@@ -769,7 +639,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
769 639
770 if (likely(p->des2)) 640 if (likely(p->des2))
771 dma_unmap_single(priv->device, p->des2, 641 dma_unmap_single(priv->device, p->des2,
772 priv->mac_type->ops->get_tx_len(p), 642 priv->hw->desc->get_tx_len(p),
773 DMA_TO_DEVICE); 643 DMA_TO_DEVICE);
774 if (unlikely(p->des3)) 644 if (unlikely(p->des3))
775 p->des3 = 0; 645 p->des3 = 0;
@@ -790,7 +660,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
790 priv->tx_skbuff[entry] = NULL; 660 priv->tx_skbuff[entry] = NULL;
791 } 661 }
792 662
793 priv->mac_type->ops->release_tx_desc(p); 663 priv->hw->desc->release_tx_desc(p);
794 664
795 entry = (++priv->dirty_tx) % txsize; 665 entry = (++priv->dirty_tx) % txsize;
796 } 666 }
@@ -814,7 +684,7 @@ static inline void stmmac_enable_irq(struct stmmac_priv *priv)
814 priv->tm->timer_start(tmrate); 684 priv->tm->timer_start(tmrate);
815 else 685 else
816#endif 686#endif
817 writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA); 687 priv->hw->dma->enable_dma_irq(priv->dev->base_addr);
818} 688}
819 689
820static inline void stmmac_disable_irq(struct stmmac_priv *priv) 690static inline void stmmac_disable_irq(struct stmmac_priv *priv)
@@ -824,7 +694,7 @@ static inline void stmmac_disable_irq(struct stmmac_priv *priv)
824 priv->tm->timer_stop(); 694 priv->tm->timer_stop();
825 else 695 else
826#endif 696#endif
827 writel(0, priv->dev->base_addr + DMA_INTR_ENA); 697 priv->hw->dma->disable_dma_irq(priv->dev->base_addr);
828} 698}
829 699
830static int stmmac_has_work(struct stmmac_priv *priv) 700static int stmmac_has_work(struct stmmac_priv *priv)
@@ -832,7 +702,7 @@ static int stmmac_has_work(struct stmmac_priv *priv)
832 unsigned int has_work = 0; 702 unsigned int has_work = 0;
833 int rxret, tx_work = 0; 703 int rxret, tx_work = 0;
834 704
835 rxret = priv->mac_type->ops->get_rx_owner(priv->dma_rx + 705 rxret = priv->hw->desc->get_rx_owner(priv->dma_rx +
836 (priv->cur_rx % priv->dma_rx_size)); 706 (priv->cur_rx % priv->dma_rx_size));
837 707
838 if (priv->dirty_tx != priv->cur_tx) 708 if (priv->dirty_tx != priv->cur_tx)
@@ -883,12 +753,12 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
883{ 753{
884 netif_stop_queue(priv->dev); 754 netif_stop_queue(priv->dev);
885 755
886 stmmac_dma_stop_tx(priv->dev->base_addr); 756 priv->hw->dma->stop_tx(priv->dev->base_addr);
887 dma_free_tx_skbufs(priv); 757 dma_free_tx_skbufs(priv);
888 priv->mac_type->ops->init_tx_desc(priv->dma_tx, priv->dma_tx_size); 758 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
889 priv->dirty_tx = 0; 759 priv->dirty_tx = 0;
890 priv->cur_tx = 0; 760 priv->cur_tx = 0;
891 stmmac_dma_start_tx(priv->dev->base_addr); 761 priv->hw->dma->start_tx(priv->dev->base_addr);
892 762
893 priv->dev->stats.tx_errors++; 763 priv->dev->stats.tx_errors++;
894 netif_wake_queue(priv->dev); 764 netif_wake_queue(priv->dev);
@@ -896,95 +766,27 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
896 return; 766 return;
897} 767}
898 768
899/**
900 * stmmac_dma_interrupt - Interrupt handler for the driver
901 * @dev: net device structure
902 * Description: Interrupt handler for the driver (DMA).
903 */
904static void stmmac_dma_interrupt(struct net_device *dev)
905{
906 unsigned long ioaddr = dev->base_addr;
907 struct stmmac_priv *priv = netdev_priv(dev);
908 /* read the status register (CSR5) */
909 u32 intr_status = readl(ioaddr + DMA_STATUS);
910
911 DBG(intr, INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
912 769
913#ifdef STMMAC_DEBUG 770static void stmmac_dma_interrupt(struct stmmac_priv *priv)
914 /* It displays the DMA transmit process state (CSR5 register) */ 771{
915 if (netif_msg_tx_done(priv)) 772 unsigned long ioaddr = priv->dev->base_addr;
916 show_tx_process_state(intr_status); 773 int status;
917 if (netif_msg_rx_status(priv)) 774
918 show_rx_process_state(intr_status); 775 status = priv->hw->dma->dma_interrupt(priv->dev->base_addr,
919#endif 776 &priv->xstats);
920 /* ABNORMAL interrupts */ 777 if (likely(status == handle_tx_rx))
921 if (unlikely(intr_status & DMA_STATUS_AIS)) { 778 _stmmac_schedule(priv);
922 DBG(intr, INFO, "CSR5[15] DMA ABNORMAL IRQ: "); 779
923 if (unlikely(intr_status & DMA_STATUS_UNF)) { 780 else if (unlikely(status == tx_hard_error_bump_tc)) {
924 DBG(intr, INFO, "transmit underflow\n"); 781 /* Try to bump up the dma threshold on this failure */
925 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) { 782 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
926 /* Try to bump up the threshold */ 783 tc += 64;
927 tc += 64; 784 priv->hw->dma->dma_mode(ioaddr, tc, SF_DMA_MODE);
928 priv->mac_type->ops->dma_mode(ioaddr, tc, 785 priv->xstats.threshold = tc;
929 SF_DMA_MODE);
930 priv->xstats.threshold = tc;
931 }
932 stmmac_tx_err(priv);
933 priv->xstats.tx_undeflow_irq++;
934 }
935 if (unlikely(intr_status & DMA_STATUS_TJT)) {
936 DBG(intr, INFO, "transmit jabber\n");
937 priv->xstats.tx_jabber_irq++;
938 }
939 if (unlikely(intr_status & DMA_STATUS_OVF)) {
940 DBG(intr, INFO, "recv overflow\n");
941 priv->xstats.rx_overflow_irq++;
942 }
943 if (unlikely(intr_status & DMA_STATUS_RU)) {
944 DBG(intr, INFO, "receive buffer unavailable\n");
945 priv->xstats.rx_buf_unav_irq++;
946 }
947 if (unlikely(intr_status & DMA_STATUS_RPS)) {
948 DBG(intr, INFO, "receive process stopped\n");
949 priv->xstats.rx_process_stopped_irq++;
950 }
951 if (unlikely(intr_status & DMA_STATUS_RWT)) {
952 DBG(intr, INFO, "receive watchdog\n");
953 priv->xstats.rx_watchdog_irq++;
954 }
955 if (unlikely(intr_status & DMA_STATUS_ETI)) {
956 DBG(intr, INFO, "transmit early interrupt\n");
957 priv->xstats.tx_early_irq++;
958 }
959 if (unlikely(intr_status & DMA_STATUS_TPS)) {
960 DBG(intr, INFO, "transmit process stopped\n");
961 priv->xstats.tx_process_stopped_irq++;
962 stmmac_tx_err(priv);
963 }
964 if (unlikely(intr_status & DMA_STATUS_FBI)) {
965 DBG(intr, INFO, "fatal bus error\n");
966 priv->xstats.fatal_bus_error_irq++;
967 stmmac_tx_err(priv);
968 } 786 }
969 } 787 stmmac_tx_err(priv);
970 788 } else if (unlikely(status == tx_hard_error))
971 /* TX/RX NORMAL interrupts */ 789 stmmac_tx_err(priv);
972 if (intr_status & DMA_STATUS_NIS) {
973 priv->xstats.normal_irq_n++;
974 if (likely((intr_status & DMA_STATUS_RI) ||
975 (intr_status & (DMA_STATUS_TI))))
976 _stmmac_schedule(priv);
977 }
978
979 /* Optional hardware blocks, interrupts should be disabled */
980 if (unlikely(intr_status &
981 (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
982 pr_info("%s: unexpected status %08x\n", __func__, intr_status);
983
984 /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
985 writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
986
987 DBG(intr, INFO, "\n\n");
988 790
989 return; 791 return;
990} 792}
@@ -1058,17 +860,20 @@ static int stmmac_open(struct net_device *dev)
1058 init_dma_desc_rings(dev); 860 init_dma_desc_rings(dev);
1059 861
1060 /* DMA initialization and SW reset */ 862 /* DMA initialization and SW reset */
1061 if (unlikely(priv->mac_type->ops->dma_init(ioaddr, 863 if (unlikely(priv->hw->dma->init(ioaddr, priv->pbl, priv->dma_tx_phy,
1062 priv->pbl, priv->dma_tx_phy, priv->dma_rx_phy) < 0)) { 864 priv->dma_rx_phy) < 0)) {
1063 865
1064 pr_err("%s: DMA initialization failed\n", __func__); 866 pr_err("%s: DMA initialization failed\n", __func__);
1065 return -1; 867 return -1;
1066 } 868 }
1067 869
1068 /* Copy the MAC addr into the HW */ 870 /* Copy the MAC addr into the HW */
1069 priv->mac_type->ops->set_umac_addr(ioaddr, dev->dev_addr, 0); 871 priv->hw->mac->set_umac_addr(ioaddr, dev->dev_addr, 0);
872 /* If required, perform hw setup of the bus. */
873 if (priv->bus_setup)
874 priv->bus_setup(ioaddr);
1070 /* Initialize the MAC Core */ 875 /* Initialize the MAC Core */
1071 priv->mac_type->ops->core_init(ioaddr); 876 priv->hw->mac->core_init(ioaddr);
1072 877
1073 priv->shutdown = 0; 878 priv->shutdown = 0;
1074 879
@@ -1089,16 +894,16 @@ static int stmmac_open(struct net_device *dev)
1089 894
1090 /* Start the ball rolling... */ 895 /* Start the ball rolling... */
1091 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); 896 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
1092 stmmac_dma_start_tx(ioaddr); 897 priv->hw->dma->start_tx(ioaddr);
1093 stmmac_dma_start_rx(ioaddr); 898 priv->hw->dma->start_rx(ioaddr);
1094 899
1095#ifdef CONFIG_STMMAC_TIMER 900#ifdef CONFIG_STMMAC_TIMER
1096 priv->tm->timer_start(tmrate); 901 priv->tm->timer_start(tmrate);
1097#endif 902#endif
1098 /* Dump DMA/MAC registers */ 903 /* Dump DMA/MAC registers */
1099 if (netif_msg_hw(priv)) { 904 if (netif_msg_hw(priv)) {
1100 priv->mac_type->ops->dump_mac_regs(ioaddr); 905 priv->hw->mac->dump_regs(ioaddr);
1101 priv->mac_type->ops->dump_dma_regs(ioaddr); 906 priv->hw->dma->dump_regs(ioaddr);
1102 } 907 }
1103 908
1104 if (priv->phydev) 909 if (priv->phydev)
@@ -1142,8 +947,8 @@ static int stmmac_release(struct net_device *dev)
1142 free_irq(dev->irq, dev); 947 free_irq(dev->irq, dev);
1143 948
1144 /* Stop TX/RX DMA and clear the descriptors */ 949 /* Stop TX/RX DMA and clear the descriptors */
1145 stmmac_dma_stop_tx(dev->base_addr); 950 priv->hw->dma->stop_tx(dev->base_addr);
1146 stmmac_dma_stop_rx(dev->base_addr); 951 priv->hw->dma->stop_rx(dev->base_addr);
1147 952
1148 /* Release and free the Rx/Tx resources */ 953 /* Release and free the Rx/Tx resources */
1149 free_dma_desc_resources(priv); 954 free_dma_desc_resources(priv);
@@ -1214,8 +1019,8 @@ static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
1214 desc->des2 = dma_map_single(priv->device, skb->data, 1019 desc->des2 = dma_map_single(priv->device, skb->data,
1215 BUF_SIZE_8KiB, DMA_TO_DEVICE); 1020 BUF_SIZE_8KiB, DMA_TO_DEVICE);
1216 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 1021 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1217 priv->mac_type->ops->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB, 1022 priv->hw->desc->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
1218 csum_insertion); 1023 csum_insertion);
1219 1024
1220 entry = (++priv->cur_tx) % txsize; 1025 entry = (++priv->cur_tx) % txsize;
1221 desc = priv->dma_tx + entry; 1026 desc = priv->dma_tx + entry;
@@ -1224,16 +1029,16 @@ static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
1224 skb->data + BUF_SIZE_8KiB, 1029 skb->data + BUF_SIZE_8KiB,
1225 buf2_size, DMA_TO_DEVICE); 1030 buf2_size, DMA_TO_DEVICE);
1226 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 1031 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1227 priv->mac_type->ops->prepare_tx_desc(desc, 0, 1032 priv->hw->desc->prepare_tx_desc(desc, 0, buf2_size,
1228 buf2_size, csum_insertion); 1033 csum_insertion);
1229 priv->mac_type->ops->set_tx_owner(desc); 1034 priv->hw->desc->set_tx_owner(desc);
1230 priv->tx_skbuff[entry] = NULL; 1035 priv->tx_skbuff[entry] = NULL;
1231 } else { 1036 } else {
1232 desc->des2 = dma_map_single(priv->device, skb->data, 1037 desc->des2 = dma_map_single(priv->device, skb->data,
1233 nopaged_len, DMA_TO_DEVICE); 1038 nopaged_len, DMA_TO_DEVICE);
1234 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 1039 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1235 priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len, 1040 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
1236 csum_insertion); 1041 csum_insertion);
1237 } 1042 }
1238 return entry; 1043 return entry;
1239} 1044}
@@ -1301,8 +1106,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1301 unsigned int nopaged_len = skb_headlen(skb); 1106 unsigned int nopaged_len = skb_headlen(skb);
1302 desc->des2 = dma_map_single(priv->device, skb->data, 1107 desc->des2 = dma_map_single(priv->device, skb->data,
1303 nopaged_len, DMA_TO_DEVICE); 1108 nopaged_len, DMA_TO_DEVICE);
1304 priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len, 1109 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
1305 csum_insertion); 1110 csum_insertion);
1306 } 1111 }
1307 1112
1308 for (i = 0; i < nfrags; i++) { 1113 for (i = 0; i < nfrags; i++) {
@@ -1317,21 +1122,20 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1317 frag->page_offset, 1122 frag->page_offset,
1318 len, DMA_TO_DEVICE); 1123 len, DMA_TO_DEVICE);
1319 priv->tx_skbuff[entry] = NULL; 1124 priv->tx_skbuff[entry] = NULL;
1320 priv->mac_type->ops->prepare_tx_desc(desc, 0, len, 1125 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
1321 csum_insertion); 1126 priv->hw->desc->set_tx_owner(desc);
1322 priv->mac_type->ops->set_tx_owner(desc);
1323 } 1127 }
1324 1128
1325 /* Interrupt on completition only for the latest segment */ 1129 /* Interrupt on completition only for the latest segment */
1326 priv->mac_type->ops->close_tx_desc(desc); 1130 priv->hw->desc->close_tx_desc(desc);
1327 1131
1328#ifdef CONFIG_STMMAC_TIMER 1132#ifdef CONFIG_STMMAC_TIMER
1329 /* Clean IC while using timer */ 1133 /* Clean IC while using timer */
1330 if (likely(priv->tm->enable)) 1134 if (likely(priv->tm->enable))
1331 priv->mac_type->ops->clear_tx_ic(desc); 1135 priv->hw->desc->clear_tx_ic(desc);
1332#endif 1136#endif
1333 /* To avoid raise condition */ 1137 /* To avoid raise condition */
1334 priv->mac_type->ops->set_tx_owner(first); 1138 priv->hw->desc->set_tx_owner(first);
1335 1139
1336 priv->cur_tx++; 1140 priv->cur_tx++;
1337 1141
@@ -1353,8 +1157,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1353 1157
1354 dev->stats.tx_bytes += skb->len; 1158 dev->stats.tx_bytes += skb->len;
1355 1159
1356 /* CSR1 enables the transmit DMA to check for new descriptor */ 1160 priv->hw->dma->enable_dma_transmission(dev->base_addr);
1357 writel(1, dev->base_addr + DMA_XMT_POLL_DEMAND);
1358 1161
1359 return NETDEV_TX_OK; 1162 return NETDEV_TX_OK;
1360} 1163}
@@ -1391,7 +1194,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1391 } 1194 }
1392 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry); 1195 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
1393 } 1196 }
1394 priv->mac_type->ops->set_rx_owner(p + entry); 1197 priv->hw->desc->set_rx_owner(p + entry);
1395 } 1198 }
1396 return; 1199 return;
1397} 1200}
@@ -1412,7 +1215,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1412 } 1215 }
1413#endif 1216#endif
1414 count = 0; 1217 count = 0;
1415 while (!priv->mac_type->ops->get_rx_owner(p)) { 1218 while (!priv->hw->desc->get_rx_owner(p)) {
1416 int status; 1219 int status;
1417 1220
1418 if (count >= limit) 1221 if (count >= limit)
@@ -1425,15 +1228,14 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1425 prefetch(p_next); 1228 prefetch(p_next);
1426 1229
1427 /* read the status of the incoming frame */ 1230 /* read the status of the incoming frame */
1428 status = (priv->mac_type->ops->rx_status(&priv->dev->stats, 1231 status = (priv->hw->desc->rx_status(&priv->dev->stats,
1429 &priv->xstats, p)); 1232 &priv->xstats, p));
1430 if (unlikely(status == discard_frame)) 1233 if (unlikely(status == discard_frame))
1431 priv->dev->stats.rx_errors++; 1234 priv->dev->stats.rx_errors++;
1432 else { 1235 else {
1433 struct sk_buff *skb; 1236 struct sk_buff *skb;
1434 /* Length should omit the CRC */ 1237 /* Length should omit the CRC */
1435 int frame_len = 1238 int frame_len = priv->hw->desc->get_rx_frame_len(p) - 4;
1436 priv->mac_type->ops->get_rx_frame_len(p) - 4;
1437 1239
1438#ifdef STMMAC_RX_DEBUG 1240#ifdef STMMAC_RX_DEBUG
1439 if (frame_len > ETH_FRAME_LEN) 1241 if (frame_len > ETH_FRAME_LEN)
@@ -1569,7 +1371,7 @@ static void stmmac_multicast_list(struct net_device *dev)
1569 struct stmmac_priv *priv = netdev_priv(dev); 1371 struct stmmac_priv *priv = netdev_priv(dev);
1570 1372
1571 spin_lock(&priv->lock); 1373 spin_lock(&priv->lock);
1572 priv->mac_type->ops->set_filter(dev); 1374 priv->hw->mac->set_filter(dev);
1573 spin_unlock(&priv->lock); 1375 spin_unlock(&priv->lock);
1574 return; 1376 return;
1575} 1377}
@@ -1623,9 +1425,10 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
1623 if (priv->is_gmac) { 1425 if (priv->is_gmac) {
1624 unsigned long ioaddr = dev->base_addr; 1426 unsigned long ioaddr = dev->base_addr;
1625 /* To handle GMAC own interrupts */ 1427 /* To handle GMAC own interrupts */
1626 priv->mac_type->ops->host_irq_status(ioaddr); 1428 priv->hw->mac->host_irq_status(ioaddr);
1627 } 1429 }
1628 stmmac_dma_interrupt(dev); 1430
1431 stmmac_dma_interrupt(priv);
1629 1432
1630 return IRQ_HANDLED; 1433 return IRQ_HANDLED;
1631} 1434}
@@ -1744,7 +1547,7 @@ static int stmmac_probe(struct net_device *dev)
1744 netif_napi_add(dev, &priv->napi, stmmac_poll, 64); 1547 netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
1745 1548
1746 /* Get the MAC address */ 1549 /* Get the MAC address */
1747 priv->mac_type->ops->get_umac_addr(dev->base_addr, dev->dev_addr, 0); 1550 priv->hw->mac->get_umac_addr(dev->base_addr, dev->dev_addr, 0);
1748 1551
1749 if (!is_valid_ether_addr(dev->dev_addr)) 1552 if (!is_valid_ether_addr(dev->dev_addr))
1750 pr_warning("\tno valid MAC address;" 1553 pr_warning("\tno valid MAC address;"
@@ -1779,16 +1582,16 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1779 struct mac_device_info *device; 1582 struct mac_device_info *device;
1780 1583
1781 if (priv->is_gmac) 1584 if (priv->is_gmac)
1782 device = gmac_setup(ioaddr); 1585 device = dwmac1000_setup(ioaddr);
1783 else 1586 else
1784 device = mac100_setup(ioaddr); 1587 device = dwmac100_setup(ioaddr);
1785 1588
1786 if (!device) 1589 if (!device)
1787 return -ENOMEM; 1590 return -ENOMEM;
1788 1591
1789 priv->mac_type = device; 1592 priv->hw = device;
1790 1593
1791 priv->wolenabled = priv->mac_type->hw.pmt; /* PMT supported */ 1594 priv->wolenabled = priv->hw->pmt; /* PMT supported */
1792 if (priv->wolenabled == PMT_SUPPORTED) 1595 if (priv->wolenabled == PMT_SUPPORTED)
1793 priv->wolopts = WAKE_MAGIC; /* Magic Frame */ 1596 priv->wolopts = WAKE_MAGIC; /* Magic Frame */
1794 1597
@@ -1797,8 +1600,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1797 1600
1798static int stmmacphy_dvr_probe(struct platform_device *pdev) 1601static int stmmacphy_dvr_probe(struct platform_device *pdev)
1799{ 1602{
1800 struct plat_stmmacphy_data *plat_dat; 1603 struct plat_stmmacphy_data *plat_dat = pdev->dev.platform_data;
1801 plat_dat = (struct plat_stmmacphy_data *)((pdev->dev).platform_data);
1802 1604
1803 pr_debug("stmmacphy_dvr_probe: added phy for bus %d\n", 1605 pr_debug("stmmacphy_dvr_probe: added phy for bus %d\n",
1804 plat_dat->bus_id); 1606 plat_dat->bus_id);
@@ -1830,9 +1632,7 @@ static struct platform_driver stmmacphy_driver = {
1830static int stmmac_associate_phy(struct device *dev, void *data) 1632static int stmmac_associate_phy(struct device *dev, void *data)
1831{ 1633{
1832 struct stmmac_priv *priv = (struct stmmac_priv *)data; 1634 struct stmmac_priv *priv = (struct stmmac_priv *)data;
1833 struct plat_stmmacphy_data *plat_dat; 1635 struct plat_stmmacphy_data *plat_dat = dev->platform_data;
1834
1835 plat_dat = (struct plat_stmmacphy_data *)(dev->platform_data);
1836 1636
1837 DBG(probe, DEBUG, "%s: checking phy for bus %d\n", __func__, 1637 DBG(probe, DEBUG, "%s: checking phy for bus %d\n", __func__,
1838 plat_dat->bus_id); 1638 plat_dat->bus_id);
@@ -1922,7 +1722,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1922 priv = netdev_priv(ndev); 1722 priv = netdev_priv(ndev);
1923 priv->device = &(pdev->dev); 1723 priv->device = &(pdev->dev);
1924 priv->dev = ndev; 1724 priv->dev = ndev;
1925 plat_dat = (struct plat_stmmacenet_data *)((pdev->dev).platform_data); 1725 plat_dat = pdev->dev.platform_data;
1926 priv->bus_id = plat_dat->bus_id; 1726 priv->bus_id = plat_dat->bus_id;
1927 priv->pbl = plat_dat->pbl; /* TLI */ 1727 priv->pbl = plat_dat->pbl; /* TLI */
1928 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */ 1728 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
@@ -1932,6 +1732,11 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1932 /* Set the I/O base addr */ 1732 /* Set the I/O base addr */
1933 ndev->base_addr = (unsigned long)addr; 1733 ndev->base_addr = (unsigned long)addr;
1934 1734
1735 /* Verify embedded resource for the platform */
1736 ret = stmmac_claim_resource(pdev);
1737 if (ret < 0)
1738 goto out;
1739
1935 /* MAC HW revice detection */ 1740 /* MAC HW revice detection */
1936 ret = stmmac_mac_device_setup(ndev); 1741 ret = stmmac_mac_device_setup(ndev);
1937 if (ret < 0) 1742 if (ret < 0)
@@ -1952,6 +1757,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1952 } 1757 }
1953 1758
1954 priv->fix_mac_speed = plat_dat->fix_mac_speed; 1759 priv->fix_mac_speed = plat_dat->fix_mac_speed;
1760 priv->bus_setup = plat_dat->bus_setup;
1955 priv->bsp_priv = plat_dat->bsp_priv; 1761 priv->bsp_priv = plat_dat->bsp_priv;
1956 1762
1957 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n" 1763 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
@@ -1986,12 +1792,13 @@ out:
1986static int stmmac_dvr_remove(struct platform_device *pdev) 1792static int stmmac_dvr_remove(struct platform_device *pdev)
1987{ 1793{
1988 struct net_device *ndev = platform_get_drvdata(pdev); 1794 struct net_device *ndev = platform_get_drvdata(pdev);
1795 struct stmmac_priv *priv = netdev_priv(ndev);
1989 struct resource *res; 1796 struct resource *res;
1990 1797
1991 pr_info("%s:\n\tremoving driver", __func__); 1798 pr_info("%s:\n\tremoving driver", __func__);
1992 1799
1993 stmmac_dma_stop_rx(ndev->base_addr); 1800 priv->hw->dma->stop_rx(ndev->base_addr);
1994 stmmac_dma_stop_tx(ndev->base_addr); 1801 priv->hw->dma->stop_tx(ndev->base_addr);
1995 1802
1996 stmmac_mac_disable_rx(ndev->base_addr); 1803 stmmac_mac_disable_rx(ndev->base_addr);
1997 stmmac_mac_disable_tx(ndev->base_addr); 1804 stmmac_mac_disable_tx(ndev->base_addr);
@@ -2038,21 +1845,20 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
2038 napi_disable(&priv->napi); 1845 napi_disable(&priv->napi);
2039 1846
2040 /* Stop TX/RX DMA */ 1847 /* Stop TX/RX DMA */
2041 stmmac_dma_stop_tx(dev->base_addr); 1848 priv->hw->dma->stop_tx(dev->base_addr);
2042 stmmac_dma_stop_rx(dev->base_addr); 1849 priv->hw->dma->stop_rx(dev->base_addr);
2043 /* Clear the Rx/Tx descriptors */ 1850 /* Clear the Rx/Tx descriptors */
2044 priv->mac_type->ops->init_rx_desc(priv->dma_rx, 1851 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
2045 priv->dma_rx_size, dis_ic); 1852 dis_ic);
2046 priv->mac_type->ops->init_tx_desc(priv->dma_tx, 1853 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
2047 priv->dma_tx_size);
2048 1854
2049 stmmac_mac_disable_tx(dev->base_addr); 1855 stmmac_mac_disable_tx(dev->base_addr);
2050 1856
2051 if (device_may_wakeup(&(pdev->dev))) { 1857 if (device_may_wakeup(&(pdev->dev))) {
2052 /* Enable Power down mode by programming the PMT regs */ 1858 /* Enable Power down mode by programming the PMT regs */
2053 if (priv->wolenabled == PMT_SUPPORTED) 1859 if (priv->wolenabled == PMT_SUPPORTED)
2054 priv->mac_type->ops->pmt(dev->base_addr, 1860 priv->hw->mac->pmt(dev->base_addr,
2055 priv->wolopts); 1861 priv->wolopts);
2056 } else { 1862 } else {
2057 stmmac_mac_disable_rx(dev->base_addr); 1863 stmmac_mac_disable_rx(dev->base_addr);
2058 } 1864 }
@@ -2093,15 +1899,15 @@ static int stmmac_resume(struct platform_device *pdev)
2093 * from another devices (e.g. serial console). */ 1899 * from another devices (e.g. serial console). */
2094 if (device_may_wakeup(&(pdev->dev))) 1900 if (device_may_wakeup(&(pdev->dev)))
2095 if (priv->wolenabled == PMT_SUPPORTED) 1901 if (priv->wolenabled == PMT_SUPPORTED)
2096 priv->mac_type->ops->pmt(dev->base_addr, 0); 1902 priv->hw->mac->pmt(dev->base_addr, 0);
2097 1903
2098 netif_device_attach(dev); 1904 netif_device_attach(dev);
2099 1905
2100 /* Enable the MAC and DMA */ 1906 /* Enable the MAC and DMA */
2101 stmmac_mac_enable_rx(ioaddr); 1907 stmmac_mac_enable_rx(ioaddr);
2102 stmmac_mac_enable_tx(ioaddr); 1908 stmmac_mac_enable_tx(ioaddr);
2103 stmmac_dma_start_tx(ioaddr); 1909 priv->hw->dma->start_tx(ioaddr);
2104 stmmac_dma_start_rx(ioaddr); 1910 priv->hw->dma->start_rx(ioaddr);
2105 1911
2106#ifdef CONFIG_STMMAC_TIMER 1912#ifdef CONFIG_STMMAC_TIMER
2107 priv->tm->timer_start(tmrate); 1913 priv->tm->timer_start(tmrate);
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
index 8498552a22fc..fffe1d037fe6 100644
--- a/drivers/net/stmmac/stmmac_mdio.c
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -24,7 +24,6 @@
24 Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com> 24 Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
25*******************************************************************************/ 25*******************************************************************************/
26 26
27#include <linux/netdevice.h>
28#include <linux/mii.h> 27#include <linux/mii.h>
29#include <linux/phy.h> 28#include <linux/phy.h>
30 29
@@ -48,8 +47,8 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
48 struct net_device *ndev = bus->priv; 47 struct net_device *ndev = bus->priv;
49 struct stmmac_priv *priv = netdev_priv(ndev); 48 struct stmmac_priv *priv = netdev_priv(ndev);
50 unsigned long ioaddr = ndev->base_addr; 49 unsigned long ioaddr = ndev->base_addr;
51 unsigned int mii_address = priv->mac_type->hw.mii.addr; 50 unsigned int mii_address = priv->hw->mii.addr;
52 unsigned int mii_data = priv->mac_type->hw.mii.data; 51 unsigned int mii_data = priv->hw->mii.data;
53 52
54 int data; 53 int data;
55 u16 regValue = (((phyaddr << 11) & (0x0000F800)) | 54 u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
@@ -80,8 +79,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
80 struct net_device *ndev = bus->priv; 79 struct net_device *ndev = bus->priv;
81 struct stmmac_priv *priv = netdev_priv(ndev); 80 struct stmmac_priv *priv = netdev_priv(ndev);
82 unsigned long ioaddr = ndev->base_addr; 81 unsigned long ioaddr = ndev->base_addr;
83 unsigned int mii_address = priv->mac_type->hw.mii.addr; 82 unsigned int mii_address = priv->hw->mii.addr;
84 unsigned int mii_data = priv->mac_type->hw.mii.data; 83 unsigned int mii_data = priv->hw->mii.data;
85 84
86 u16 value = 85 u16 value =
87 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0))) 86 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
@@ -112,7 +111,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
112 struct net_device *ndev = bus->priv; 111 struct net_device *ndev = bus->priv;
113 struct stmmac_priv *priv = netdev_priv(ndev); 112 struct stmmac_priv *priv = netdev_priv(ndev);
114 unsigned long ioaddr = ndev->base_addr; 113 unsigned long ioaddr = ndev->base_addr;
115 unsigned int mii_address = priv->mac_type->hw.mii.addr; 114 unsigned int mii_address = priv->hw->mii.addr;
116 115
117 if (priv->phy_reset) { 116 if (priv->phy_reset) {
118 pr_debug("stmmac_mdio_reset: calling phy_reset\n"); 117 pr_debug("stmmac_mdio_reset: calling phy_reset\n");
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index b447a8719427..2f6a760e5f21 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -413,8 +413,8 @@ static int init586(struct net_device *dev)
413 volatile struct iasetup_cmd_struct *ias_cmd; 413 volatile struct iasetup_cmd_struct *ias_cmd;
414 volatile struct tdr_cmd_struct *tdr_cmd; 414 volatile struct tdr_cmd_struct *tdr_cmd;
415 volatile struct mcsetup_cmd_struct *mc_cmd; 415 volatile struct mcsetup_cmd_struct *mc_cmd;
416 struct dev_mc_list *dmi=dev->mc_list; 416 struct dev_mc_list *dmi;
417 int num_addrs=dev->mc_count; 417 int num_addrs=netdev_mc_count(dev);
418 418
419 ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct)); 419 ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
420 420
@@ -536,8 +536,10 @@ static int init586(struct net_device *dev)
536 mc_cmd->cmd_link = 0xffff; 536 mc_cmd->cmd_link = 0xffff;
537 mc_cmd->mc_cnt = swab16(num_addrs * 6); 537 mc_cmd->mc_cnt = swab16(num_addrs * 6);
538 538
539 for(i=0;i<num_addrs;i++,dmi=dmi->next) 539 i = 0;
540 memcpy((char *) mc_cmd->mc_list[i], dmi->dmi_addr,6); 540 netdev_for_each_mc_addr(dmi, dev)
541 memcpy((char *) mc_cmd->mc_list[i++],
542 dmi->dmi_addr, ETH_ALEN);
541 543
542 p->scb->cbl_offset = make16(mc_cmd); 544 p->scb->cbl_offset = make16(mc_cmd);
543 p->scb->cmd_cuc = CUC_START; 545 p->scb->cmd_cuc = CUC_START;
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 0ca4241b4f63..99998862c22e 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -917,7 +917,7 @@ static void set_multicast_list( struct net_device *dev )
917 REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */ 917 REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
918 } else { 918 } else {
919 short multicast_table[4]; 919 short multicast_table[4];
920 int num_addrs = dev->mc_count; 920 int num_addrs = netdev_mc_count(dev);
921 int i; 921 int i;
922 /* We don't use the multicast table, but rely on upper-layer 922 /* We don't use the multicast table, but rely on upper-layer
923 * filtering. */ 923 * filtering. */
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 25e81ebd9cd8..a0bd361d5eca 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -999,7 +999,7 @@ static void bigmac_set_multicast(struct net_device *dev)
999{ 999{
1000 struct bigmac *bp = netdev_priv(dev); 1000 struct bigmac *bp = netdev_priv(dev);
1001 void __iomem *bregs = bp->bregs; 1001 void __iomem *bregs = bp->bregs;
1002 struct dev_mc_list *dmi = dev->mc_list; 1002 struct dev_mc_list *dmi;
1003 char *addrs; 1003 char *addrs;
1004 int i; 1004 int i;
1005 u32 tmp, crc; 1005 u32 tmp, crc;
@@ -1013,7 +1013,7 @@ static void bigmac_set_multicast(struct net_device *dev)
1013 while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0) 1013 while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0)
1014 udelay(20); 1014 udelay(20);
1015 1015
1016 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) { 1016 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
1017 sbus_writel(0xffff, bregs + BMAC_HTABLE0); 1017 sbus_writel(0xffff, bregs + BMAC_HTABLE0);
1018 sbus_writel(0xffff, bregs + BMAC_HTABLE1); 1018 sbus_writel(0xffff, bregs + BMAC_HTABLE1);
1019 sbus_writel(0xffff, bregs + BMAC_HTABLE2); 1019 sbus_writel(0xffff, bregs + BMAC_HTABLE2);
@@ -1028,9 +1028,8 @@ static void bigmac_set_multicast(struct net_device *dev)
1028 for (i = 0; i < 4; i++) 1028 for (i = 0; i < 4; i++)
1029 hash_table[i] = 0; 1029 hash_table[i] = 0;
1030 1030
1031 for (i = 0; i < dev->mc_count; i++) { 1031 netdev_for_each_mc_addr(dmi, dev) {
1032 addrs = dmi->dmi_addr; 1032 addrs = dmi->dmi_addr;
1033 dmi = dmi->next;
1034 1033
1035 if (!(*addrs & 1)) 1034 if (!(*addrs & 1))
1036 continue; 1035 continue;
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index d58e1891ca60..a855934dfc3b 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -206,7 +206,7 @@ IVc. Errata
206#define USE_IO_OPS 1 206#define USE_IO_OPS 1
207#endif 207#endif
208 208
209static const struct pci_device_id sundance_pci_tbl[] = { 209static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
210 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 }, 210 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
211 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 }, 211 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
212 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 }, 212 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
@@ -1517,19 +1517,18 @@ static void set_rx_mode(struct net_device *dev)
1517 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1517 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1518 memset(mc_filter, 0xff, sizeof(mc_filter)); 1518 memset(mc_filter, 0xff, sizeof(mc_filter));
1519 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys; 1519 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1520 } else if ((dev->mc_count > multicast_filter_limit) || 1520 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1521 (dev->flags & IFF_ALLMULTI)) { 1521 (dev->flags & IFF_ALLMULTI)) {
1522 /* Too many to match, or accept all multicasts. */ 1522 /* Too many to match, or accept all multicasts. */
1523 memset(mc_filter, 0xff, sizeof(mc_filter)); 1523 memset(mc_filter, 0xff, sizeof(mc_filter));
1524 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 1524 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1525 } else if (dev->mc_count) { 1525 } else if (!netdev_mc_empty(dev)) {
1526 struct dev_mc_list *mclist; 1526 struct dev_mc_list *mclist;
1527 int bit; 1527 int bit;
1528 int index; 1528 int index;
1529 int crc; 1529 int crc;
1530 memset (mc_filter, 0, sizeof (mc_filter)); 1530 memset (mc_filter, 0, sizeof (mc_filter));
1531 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1531 netdev_for_each_mc_addr(mclist, dev) {
1532 i++, mclist = mclist->next) {
1533 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr); 1532 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1534 for (index=0, bit=0; bit < 6; bit++, crc <<= 1) 1533 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1535 if (crc & 0x80000000) index |= 1 << bit; 1534 if (crc & 0x80000000) index |= 1 << bit;
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index b571a1babab9..4344017bfaef 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -107,7 +107,7 @@ MODULE_LICENSE("GPL");
107#define GEM_MODULE_NAME "gem" 107#define GEM_MODULE_NAME "gem"
108#define PFX GEM_MODULE_NAME ": " 108#define PFX GEM_MODULE_NAME ": "
109 109
110static struct pci_device_id gem_pci_tbl[] = { 110static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = {
111 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, 111 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
113 113
@@ -1837,7 +1837,7 @@ static u32 gem_setup_multicast(struct gem *gp)
1837 int i; 1837 int i;
1838 1838
1839 if ((gp->dev->flags & IFF_ALLMULTI) || 1839 if ((gp->dev->flags & IFF_ALLMULTI) ||
1840 (gp->dev->mc_count > 256)) { 1840 (netdev_mc_count(gp->dev) > 256)) {
1841 for (i=0; i<16; i++) 1841 for (i=0; i<16; i++)
1842 writel(0xffff, gp->regs + MAC_HASH0 + (i << 2)); 1842 writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
1843 rxcfg |= MAC_RXCFG_HFE; 1843 rxcfg |= MAC_RXCFG_HFE;
@@ -1846,17 +1846,13 @@ static u32 gem_setup_multicast(struct gem *gp)
1846 } else { 1846 } else {
1847 u16 hash_table[16]; 1847 u16 hash_table[16];
1848 u32 crc; 1848 u32 crc;
1849 struct dev_mc_list *dmi = gp->dev->mc_list; 1849 struct dev_mc_list *dmi;
1850 int i; 1850 int i;
1851 1851
1852 for (i = 0; i < 16; i++) 1852 memset(hash_table, 0, sizeof(hash_table));
1853 hash_table[i] = 0; 1853 netdev_for_each_mc_addr(dmi, gp->dev) {
1854
1855 for (i = 0; i < gp->dev->mc_count; i++) {
1856 char *addrs = dmi->dmi_addr; 1854 char *addrs = dmi->dmi_addr;
1857 1855
1858 dmi = dmi->next;
1859
1860 if (!(*addrs & 1)) 1856 if (!(*addrs & 1))
1861 continue; 1857 continue;
1862 1858
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 6762f1c6ec8a..b17dbb11bd67 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -1516,24 +1516,20 @@ static int happy_meal_init(struct happy_meal *hp)
1516 1516
1517 HMD(("htable, ")); 1517 HMD(("htable, "));
1518 if ((hp->dev->flags & IFF_ALLMULTI) || 1518 if ((hp->dev->flags & IFF_ALLMULTI) ||
1519 (hp->dev->mc_count > 64)) { 1519 (netdev_mc_count(hp->dev) > 64)) {
1520 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff); 1520 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
1521 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff); 1521 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
1522 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff); 1522 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
1523 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff); 1523 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
1524 } else if ((hp->dev->flags & IFF_PROMISC) == 0) { 1524 } else if ((hp->dev->flags & IFF_PROMISC) == 0) {
1525 u16 hash_table[4]; 1525 u16 hash_table[4];
1526 struct dev_mc_list *dmi = hp->dev->mc_list; 1526 struct dev_mc_list *dmi;
1527 char *addrs; 1527 char *addrs;
1528 int i;
1529 u32 crc; 1528 u32 crc;
1530 1529
1531 for (i = 0; i < 4; i++) 1530 memset(hash_table, 0, sizeof(hash_table));
1532 hash_table[i] = 0; 1531 netdev_for_each_mc_addr(dmi, hp->dev) {
1533
1534 for (i = 0; i < hp->dev->mc_count; i++) {
1535 addrs = dmi->dmi_addr; 1532 addrs = dmi->dmi_addr;
1536 dmi = dmi->next;
1537 1533
1538 if (!(*addrs & 1)) 1534 if (!(*addrs & 1))
1539 continue; 1535 continue;
@@ -2366,14 +2362,13 @@ static void happy_meal_set_multicast(struct net_device *dev)
2366{ 2362{
2367 struct happy_meal *hp = netdev_priv(dev); 2363 struct happy_meal *hp = netdev_priv(dev);
2368 void __iomem *bregs = hp->bigmacregs; 2364 void __iomem *bregs = hp->bigmacregs;
2369 struct dev_mc_list *dmi = dev->mc_list; 2365 struct dev_mc_list *dmi;
2370 char *addrs; 2366 char *addrs;
2371 int i;
2372 u32 crc; 2367 u32 crc;
2373 2368
2374 spin_lock_irq(&hp->happy_lock); 2369 spin_lock_irq(&hp->happy_lock);
2375 2370
2376 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) { 2371 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
2377 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff); 2372 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
2378 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff); 2373 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
2379 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff); 2374 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
@@ -2384,12 +2379,9 @@ static void happy_meal_set_multicast(struct net_device *dev)
2384 } else { 2379 } else {
2385 u16 hash_table[4]; 2380 u16 hash_table[4];
2386 2381
2387 for (i = 0; i < 4; i++) 2382 memset(hash_table, 0, sizeof(hash_table));
2388 hash_table[i] = 0; 2383 netdev_for_each_mc_addr(dmi, dev) {
2389
2390 for (i = 0; i < dev->mc_count; i++) {
2391 addrs = dmi->dmi_addr; 2384 addrs = dmi->dmi_addr;
2392 dmi = dmi->next;
2393 2385
2394 if (!(*addrs & 1)) 2386 if (!(*addrs & 1))
2395 continue; 2387 continue;
@@ -3211,7 +3203,7 @@ static void __devexit happy_meal_pci_remove(struct pci_dev *pdev)
3211 dev_set_drvdata(&pdev->dev, NULL); 3203 dev_set_drvdata(&pdev->dev, NULL);
3212} 3204}
3213 3205
3214static struct pci_device_id happymeal_pci_ids[] = { 3206static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
3215 { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) }, 3207 { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
3216 { } /* Terminating entry */ 3208 { } /* Terminating entry */
3217}; 3209};
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 64e7d08c878f..d7c73f478ef5 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1170,9 +1170,8 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
1170static void lance_load_multicast(struct net_device *dev) 1170static void lance_load_multicast(struct net_device *dev)
1171{ 1171{
1172 struct lance_private *lp = netdev_priv(dev); 1172 struct lance_private *lp = netdev_priv(dev);
1173 struct dev_mc_list *dmi = dev->mc_list; 1173 struct dev_mc_list *dmi;
1174 char *addrs; 1174 char *addrs;
1175 int i;
1176 u32 crc; 1175 u32 crc;
1177 u32 val; 1176 u32 val;
1178 1177
@@ -1196,9 +1195,8 @@ static void lance_load_multicast(struct net_device *dev)
1196 return; 1195 return;
1197 1196
1198 /* Add addresses */ 1197 /* Add addresses */
1199 for (i = 0; i < dev->mc_count; i++) { 1198 netdev_for_each_mc_addr(dmi, dev) {
1200 addrs = dmi->dmi_addr; 1199 addrs = dmi->dmi_addr;
1201 dmi = dmi->next;
1202 1200
1203 /* multicast address? */ 1201 /* multicast address? */
1204 if (!(*addrs & 1)) 1202 if (!(*addrs & 1))
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index 45c383f285ee..be637dce944c 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -627,7 +627,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
627static void qe_set_multicast(struct net_device *dev) 627static void qe_set_multicast(struct net_device *dev)
628{ 628{
629 struct sunqe *qep = netdev_priv(dev); 629 struct sunqe *qep = netdev_priv(dev);
630 struct dev_mc_list *dmi = dev->mc_list; 630 struct dev_mc_list *dmi;
631 u8 new_mconfig = qep->mconfig; 631 u8 new_mconfig = qep->mconfig;
632 char *addrs; 632 char *addrs;
633 int i; 633 int i;
@@ -636,7 +636,7 @@ static void qe_set_multicast(struct net_device *dev)
636 /* Lock out others. */ 636 /* Lock out others. */
637 netif_stop_queue(dev); 637 netif_stop_queue(dev);
638 638
639 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) { 639 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
640 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, 640 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
641 qep->mregs + MREGS_IACONFIG); 641 qep->mregs + MREGS_IACONFIG);
642 while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) 642 while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
@@ -650,12 +650,9 @@ static void qe_set_multicast(struct net_device *dev)
650 u16 hash_table[4]; 650 u16 hash_table[4];
651 u8 *hbytes = (unsigned char *) &hash_table[0]; 651 u8 *hbytes = (unsigned char *) &hash_table[0];
652 652
653 for (i = 0; i < 4; i++) 653 memset(hash_table, 0, sizeof(hash_table));
654 hash_table[i] = 0; 654 netdev_for_each_mc_addr(dmi, dev) {
655
656 for (i = 0; i < dev->mc_count; i++) {
657 addrs = dmi->dmi_addr; 655 addrs = dmi->dmi_addr;
658 dmi = dmi->next;
659 656
660 if (!(*addrs & 1)) 657 if (!(*addrs & 1))
661 continue; 658 continue;
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index bc74db0d12f3..6b1b7cea7f6b 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -765,7 +765,7 @@ static void __update_mc_list(struct vnet *vp, struct net_device *dev)
765{ 765{
766 struct dev_addr_list *p; 766 struct dev_addr_list *p;
767 767
768 for (p = dev->mc_list; p; p = p->next) { 768 netdev_for_each_mc_addr(p, dev) {
769 struct vnet_mcast_entry *m; 769 struct vnet_mcast_entry *m;
770 770
771 m = __vnet_mc_find(vp, p->dmi_addr); 771 m = __vnet_mc_find(vp, p->dmi_addr);
@@ -1062,10 +1062,7 @@ static struct vnet * __devinit vnet_new(const u64 *local_mac)
1062 goto err_out_free_dev; 1062 goto err_out_free_dev;
1063 } 1063 }
1064 1064
1065 printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name); 1065 printk(KERN_INFO "%s: Sun LDOM vnet %pM\n", dev->name, dev->dev_addr);
1066
1067 for (i = 0; i < 6; i++)
1068 printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
1069 1066
1070 list_add(&vp->list, &vnet_list); 1067 list_add(&vp->list, &vnet_list);
1071 1068
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index d71c1976072e..49bd84c0d583 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -65,7 +65,7 @@ static const struct {
65 { "TOSHIBA TC35815/TX4939" }, 65 { "TOSHIBA TC35815/TX4939" },
66}; 66};
67 67
68static const struct pci_device_id tc35815_pci_tbl[] = { 68static DEFINE_PCI_DEVICE_TABLE(tc35815_pci_tbl) = {
69 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF }, 69 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF },
70 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU }, 70 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU },
71 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 }, 71 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 },
@@ -402,6 +402,7 @@ struct tc35815_local {
402 * by this lock as well. 402 * by this lock as well.
403 */ 403 */
404 spinlock_t lock; 404 spinlock_t lock;
405 spinlock_t rx_lock;
405 406
406 struct mii_bus *mii_bus; 407 struct mii_bus *mii_bus;
407 struct phy_device *phy_dev; 408 struct phy_device *phy_dev;
@@ -835,6 +836,7 @@ static int __devinit tc35815_init_one(struct pci_dev *pdev,
835 836
836 INIT_WORK(&lp->restart_work, tc35815_restart_work); 837 INIT_WORK(&lp->restart_work, tc35815_restart_work);
837 spin_lock_init(&lp->lock); 838 spin_lock_init(&lp->lock);
839 spin_lock_init(&lp->rx_lock);
838 lp->pci_dev = pdev; 840 lp->pci_dev = pdev;
839 lp->chiptype = ent->driver_data; 841 lp->chiptype = ent->driver_data;
840 842
@@ -1186,6 +1188,7 @@ static void tc35815_restart(struct net_device *dev)
1186 printk(KERN_ERR "%s: BMCR reset failed.\n", dev->name); 1188 printk(KERN_ERR "%s: BMCR reset failed.\n", dev->name);
1187 } 1189 }
1188 1190
1191 spin_lock_bh(&lp->rx_lock);
1189 spin_lock_irq(&lp->lock); 1192 spin_lock_irq(&lp->lock);
1190 tc35815_chip_reset(dev); 1193 tc35815_chip_reset(dev);
1191 tc35815_clear_queues(dev); 1194 tc35815_clear_queues(dev);
@@ -1193,6 +1196,7 @@ static void tc35815_restart(struct net_device *dev)
1193 /* Reconfigure CAM again since tc35815_chip_init() initialize it. */ 1196 /* Reconfigure CAM again since tc35815_chip_init() initialize it. */
1194 tc35815_set_multicast_list(dev); 1197 tc35815_set_multicast_list(dev);
1195 spin_unlock_irq(&lp->lock); 1198 spin_unlock_irq(&lp->lock);
1199 spin_unlock_bh(&lp->rx_lock);
1196 1200
1197 netif_wake_queue(dev); 1201 netif_wake_queue(dev);
1198} 1202}
@@ -1211,11 +1215,14 @@ static void tc35815_schedule_restart(struct net_device *dev)
1211 struct tc35815_local *lp = netdev_priv(dev); 1215 struct tc35815_local *lp = netdev_priv(dev);
1212 struct tc35815_regs __iomem *tr = 1216 struct tc35815_regs __iomem *tr =
1213 (struct tc35815_regs __iomem *)dev->base_addr; 1217 (struct tc35815_regs __iomem *)dev->base_addr;
1218 unsigned long flags;
1214 1219
1215 /* disable interrupts */ 1220 /* disable interrupts */
1221 spin_lock_irqsave(&lp->lock, flags);
1216 tc_writel(0, &tr->Int_En); 1222 tc_writel(0, &tr->Int_En);
1217 tc_writel(tc_readl(&tr->DMA_Ctl) | DMA_IntMask, &tr->DMA_Ctl); 1223 tc_writel(tc_readl(&tr->DMA_Ctl) | DMA_IntMask, &tr->DMA_Ctl);
1218 schedule_work(&lp->restart_work); 1224 schedule_work(&lp->restart_work);
1225 spin_unlock_irqrestore(&lp->lock, flags);
1219} 1226}
1220 1227
1221static void tc35815_tx_timeout(struct net_device *dev) 1228static void tc35815_tx_timeout(struct net_device *dev)
@@ -1436,7 +1443,9 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
1436 if (status & Int_IntMacTx) { 1443 if (status & Int_IntMacTx) {
1437 /* Transmit complete. */ 1444 /* Transmit complete. */
1438 lp->lstats.tx_ints++; 1445 lp->lstats.tx_ints++;
1446 spin_lock_irq(&lp->lock);
1439 tc35815_txdone(dev); 1447 tc35815_txdone(dev);
1448 spin_unlock_irq(&lp->lock);
1440 if (ret < 0) 1449 if (ret < 0)
1441 ret = 0; 1450 ret = 0;
1442 } 1451 }
@@ -1649,7 +1658,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
1649 int received = 0, handled; 1658 int received = 0, handled;
1650 u32 status; 1659 u32 status;
1651 1660
1652 spin_lock(&lp->lock); 1661 spin_lock(&lp->rx_lock);
1653 status = tc_readl(&tr->Int_Src); 1662 status = tc_readl(&tr->Int_Src);
1654 do { 1663 do {
1655 /* BLEx, FDAEx will be cleared later */ 1664 /* BLEx, FDAEx will be cleared later */
@@ -1667,7 +1676,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
1667 } 1676 }
1668 status = tc_readl(&tr->Int_Src); 1677 status = tc_readl(&tr->Int_Src);
1669 } while (status); 1678 } while (status);
1670 spin_unlock(&lp->lock); 1679 spin_unlock(&lp->rx_lock);
1671 1680
1672 if (received < budget) { 1681 if (received < budget) {
1673 napi_complete(napi); 1682 napi_complete(napi);
@@ -1940,23 +1949,23 @@ tc35815_set_multicast_list(struct net_device *dev)
1940 /* Enable promiscuous mode */ 1949 /* Enable promiscuous mode */
1941 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl); 1950 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
1942 } else if ((dev->flags & IFF_ALLMULTI) || 1951 } else if ((dev->flags & IFF_ALLMULTI) ||
1943 dev->mc_count > CAM_ENTRY_MAX - 3) { 1952 netdev_mc_count(dev) > CAM_ENTRY_MAX - 3) {
1944 /* CAM 0, 1, 20 are reserved. */ 1953 /* CAM 0, 1, 20 are reserved. */
1945 /* Disable promiscuous mode, use normal mode. */ 1954 /* Disable promiscuous mode, use normal mode. */
1946 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl); 1955 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
1947 } else if (dev->mc_count) { 1956 } else if (!netdev_mc_empty(dev)) {
1948 struct dev_mc_list *cur_addr = dev->mc_list; 1957 struct dev_mc_list *cur_addr;
1949 int i; 1958 int i;
1950 int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE); 1959 int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
1951 1960
1952 tc_writel(0, &tr->CAM_Ctl); 1961 tc_writel(0, &tr->CAM_Ctl);
1953 /* Walk the address list, and load the filter */ 1962 /* Walk the address list, and load the filter */
1954 for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) { 1963 i = 0;
1955 if (!cur_addr) 1964 netdev_for_each_mc_addr(cur_addr, dev) {
1956 break;
1957 /* entry 0,1 is reserved. */ 1965 /* entry 0,1 is reserved. */
1958 tc35815_set_cam_entry(dev, i + 2, cur_addr->dmi_addr); 1966 tc35815_set_cam_entry(dev, i + 2, cur_addr->dmi_addr);
1959 ena_bits |= CAM_Ena_Bit(i + 2); 1967 ena_bits |= CAM_Ena_Bit(i + 2);
1968 i++;
1960 } 1969 }
1961 tc_writel(ena_bits, &tr->CAM_Ena); 1970 tc_writel(ena_bits, &tr->CAM_Ena);
1962 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); 1971 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 80b404f2b938..0c9780217c87 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -62,9 +62,11 @@
62 * 62 *
63 */ 63 */
64 64
65#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
66
65#include "tehuti.h" 67#include "tehuti.h"
66 68
67static struct pci_device_id __devinitdata bdx_pci_tbl[] = { 69static DEFINE_PCI_DEVICE_TABLE(bdx_pci_tbl) = {
68 {0x1FC9, 0x3009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 70 {0x1FC9, 0x3009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
69 {0x1FC9, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 71 {0x1FC9, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
70 {0x1FC9, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 72 {0x1FC9, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
@@ -105,26 +107,24 @@ static void print_hw_id(struct pci_dev *pdev)
105 pci_read_config_word(pdev, PCI_LINK_STATUS_REG, &pci_link_status); 107 pci_read_config_word(pdev, PCI_LINK_STATUS_REG, &pci_link_status);
106 pci_read_config_word(pdev, PCI_DEV_CTRL_REG, &pci_ctrl); 108 pci_read_config_word(pdev, PCI_DEV_CTRL_REG, &pci_ctrl);
107 109
108 printk(KERN_INFO "tehuti: %s%s\n", BDX_NIC_NAME, 110 pr_info("%s%s\n", BDX_NIC_NAME,
109 nic->port_num == 1 ? "" : ", 2-Port"); 111 nic->port_num == 1 ? "" : ", 2-Port");
110 printk(KERN_INFO 112 pr_info("srom 0x%x fpga %d build %u lane# %d max_pl 0x%x mrrs 0x%x\n",
111 "tehuti: srom 0x%x fpga %d build %u lane# %d" 113 readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF,
112 " max_pl 0x%x mrrs 0x%x\n", 114 readl(nic->regs + FPGA_SEED),
113 readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF, 115 GET_LINK_STATUS_LANES(pci_link_status),
114 readl(nic->regs + FPGA_SEED), 116 GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl));
115 GET_LINK_STATUS_LANES(pci_link_status),
116 GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl));
117} 117}
118 118
119static void print_fw_id(struct pci_nic *nic) 119static void print_fw_id(struct pci_nic *nic)
120{ 120{
121 printk(KERN_INFO "tehuti: fw 0x%x\n", readl(nic->regs + FW_VER)); 121 pr_info("fw 0x%x\n", readl(nic->regs + FW_VER));
122} 122}
123 123
124static void print_eth_id(struct net_device *ndev) 124static void print_eth_id(struct net_device *ndev)
125{ 125{
126 printk(KERN_INFO "%s: %s, Port %c\n", ndev->name, BDX_NIC_NAME, 126 netdev_info(ndev, "%s, Port %c\n",
127 (ndev->if_port == 0) ? 'A' : 'B'); 127 BDX_NIC_NAME, (ndev->if_port == 0) ? 'A' : 'B');
128 128
129} 129}
130 130
@@ -160,7 +160,7 @@ bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
160 f->va = pci_alloc_consistent(priv->pdev, 160 f->va = pci_alloc_consistent(priv->pdev,
161 memsz + FIFO_EXTRA_SPACE, &f->da); 161 memsz + FIFO_EXTRA_SPACE, &f->da);
162 if (!f->va) { 162 if (!f->va) {
163 ERR("pci_alloc_consistent failed\n"); 163 pr_err("pci_alloc_consistent failed\n");
164 RET(-ENOMEM); 164 RET(-ENOMEM);
165 } 165 }
166 f->reg_CFG0 = reg_CFG0; 166 f->reg_CFG0 = reg_CFG0;
@@ -204,13 +204,13 @@ static void bdx_link_changed(struct bdx_priv *priv)
204 if (netif_carrier_ok(priv->ndev)) { 204 if (netif_carrier_ok(priv->ndev)) {
205 netif_stop_queue(priv->ndev); 205 netif_stop_queue(priv->ndev);
206 netif_carrier_off(priv->ndev); 206 netif_carrier_off(priv->ndev);
207 ERR("%s: Link Down\n", priv->ndev->name); 207 netdev_err(priv->ndev, "Link Down\n");
208 } 208 }
209 } else { 209 } else {
210 if (!netif_carrier_ok(priv->ndev)) { 210 if (!netif_carrier_ok(priv->ndev)) {
211 netif_wake_queue(priv->ndev); 211 netif_wake_queue(priv->ndev);
212 netif_carrier_on(priv->ndev); 212 netif_carrier_on(priv->ndev);
213 ERR("%s: Link Up\n", priv->ndev->name); 213 netdev_err(priv->ndev, "Link Up\n");
214 } 214 }
215 } 215 }
216} 216}
@@ -226,10 +226,10 @@ static void bdx_isr_extra(struct bdx_priv *priv, u32 isr)
226 bdx_link_changed(priv); 226 bdx_link_changed(priv);
227 227
228 if (isr & IR_PCIE_LINK) 228 if (isr & IR_PCIE_LINK)
229 ERR("%s: PCI-E Link Fault\n", priv->ndev->name); 229 netdev_err(priv->ndev, "PCI-E Link Fault\n");
230 230
231 if (isr & IR_PCIE_TOUT) 231 if (isr & IR_PCIE_TOUT)
232 ERR("%s: PCI-E Time Out\n", priv->ndev->name); 232 netdev_err(priv->ndev, "PCI-E Time Out\n");
233 233
234} 234}
235 235
@@ -345,7 +345,7 @@ out:
345 release_firmware(fw); 345 release_firmware(fw);
346 346
347 if (rc) { 347 if (rc) {
348 ERR("%s: firmware loading failed\n", priv->ndev->name); 348 netdev_err(priv->ndev, "firmware loading failed\n");
349 if (rc == -EIO) 349 if (rc == -EIO)
350 DBG("VPC = 0x%x VIC = 0x%x INIT_STATUS = 0x%x i=%d\n", 350 DBG("VPC = 0x%x VIC = 0x%x INIT_STATUS = 0x%x i=%d\n",
351 READ_REG(priv, regVPC), 351 READ_REG(priv, regVPC),
@@ -419,9 +419,11 @@ static int bdx_hw_start(struct bdx_priv *priv)
419 WRITE_REG(priv, regGMAC_RXF_A, GMAC_RX_FILTER_OSEN | 419 WRITE_REG(priv, regGMAC_RXF_A, GMAC_RX_FILTER_OSEN |
420 GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB); 420 GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB);
421 421
422#define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI)?0:IRQF_SHARED) 422#define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI) ? 0 : IRQF_SHARED)
423 if ((rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE, 423
424 ndev->name, ndev))) 424 rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE,
425 ndev->name, ndev);
426 if (rc)
425 goto err_irq; 427 goto err_irq;
426 bdx_enable_interrupts(priv); 428 bdx_enable_interrupts(priv);
427 429
@@ -462,7 +464,7 @@ static int bdx_hw_reset_direct(void __iomem *regs)
462 readl(regs + regRXD_CFG0_0); 464 readl(regs + regRXD_CFG0_0);
463 return 0; 465 return 0;
464 } 466 }
465 ERR("tehuti: HW reset failed\n"); 467 pr_err("HW reset failed\n");
466 return 1; /* failure */ 468 return 1; /* failure */
467} 469}
468 470
@@ -486,7 +488,7 @@ static int bdx_hw_reset(struct bdx_priv *priv)
486 READ_REG(priv, regRXD_CFG0_0); 488 READ_REG(priv, regRXD_CFG0_0);
487 return 0; 489 return 0;
488 } 490 }
489 ERR("tehuti: HW reset failed\n"); 491 pr_err("HW reset failed\n");
490 return 1; /* failure */ 492 return 1; /* failure */
491} 493}
492 494
@@ -510,8 +512,7 @@ static int bdx_sw_reset(struct bdx_priv *priv)
510 mdelay(10); 512 mdelay(10);
511 } 513 }
512 if (i == 50) 514 if (i == 50)
513 ERR("%s: SW reset timeout. continuing anyway\n", 515 netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n");
514 priv->ndev->name);
515 516
516 /* 6. disable intrs */ 517 /* 6. disable intrs */
517 WRITE_REG(priv, regRDINTCM0, 0); 518 WRITE_REG(priv, regRDINTCM0, 0);
@@ -604,18 +605,15 @@ static int bdx_open(struct net_device *ndev)
604 if (netif_running(ndev)) 605 if (netif_running(ndev))
605 netif_stop_queue(priv->ndev); 606 netif_stop_queue(priv->ndev);
606 607
607 if ((rc = bdx_tx_init(priv))) 608 if ((rc = bdx_tx_init(priv)) ||
608 goto err; 609 (rc = bdx_rx_init(priv)) ||
609 610 (rc = bdx_fw_load(priv)))
610 if ((rc = bdx_rx_init(priv)))
611 goto err;
612
613 if ((rc = bdx_fw_load(priv)))
614 goto err; 611 goto err;
615 612
616 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0); 613 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
617 614
618 if ((rc = bdx_hw_start(priv))) 615 rc = bdx_hw_start(priv);
616 if (rc)
619 goto err; 617 goto err;
620 618
621 napi_enable(&priv->napi); 619 napi_enable(&priv->napi);
@@ -647,7 +645,7 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
647 if (cmd != SIOCDEVPRIVATE) { 645 if (cmd != SIOCDEVPRIVATE) {
648 error = copy_from_user(data, ifr->ifr_data, sizeof(data)); 646 error = copy_from_user(data, ifr->ifr_data, sizeof(data));
649 if (error) { 647 if (error) {
650 ERR("cant copy from user\n"); 648 pr_err("cant copy from user\n");
651 RET(error); 649 RET(error);
652 } 650 }
653 DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]); 651 DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
@@ -708,7 +706,7 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
708 ENTER; 706 ENTER;
709 DBG2("vid=%d value=%d\n", (int)vid, enable); 707 DBG2("vid=%d value=%d\n", (int)vid, enable);
710 if (unlikely(vid >= 4096)) { 708 if (unlikely(vid >= 4096)) {
711 ERR("tehuti: invalid VID: %u (> 4096)\n", vid); 709 pr_err("invalid VID: %u (> 4096)\n", vid);
712 RET(); 710 RET();
713 } 711 }
714 reg = regVLAN_0 + (vid / 32) * 4; 712 reg = regVLAN_0 + (vid / 32) * 4;
@@ -776,8 +774,8 @@ static int bdx_change_mtu(struct net_device *ndev, int new_mtu)
776 774
777 /* enforce minimum frame size */ 775 /* enforce minimum frame size */
778 if (new_mtu < ETH_ZLEN) { 776 if (new_mtu < ETH_ZLEN) {
779 ERR("%s: %s mtu %d is less then minimal %d\n", 777 netdev_err(ndev, "mtu %d is less then minimal %d\n",
780 BDX_DRV_NAME, ndev->name, new_mtu, ETH_ZLEN); 778 new_mtu, ETH_ZLEN);
781 RET(-EINVAL); 779 RET(-EINVAL);
782 } 780 }
783 781
@@ -808,7 +806,7 @@ static void bdx_setmulti(struct net_device *ndev)
808 /* set IMF to accept all multicast frmaes */ 806 /* set IMF to accept all multicast frmaes */
809 for (i = 0; i < MAC_MCST_HASH_NUM; i++) 807 for (i = 0; i < MAC_MCST_HASH_NUM; i++)
810 WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0); 808 WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
811 } else if (ndev->mc_count) { 809 } else if (!netdev_mc_empty(ndev)) {
812 u8 hash; 810 u8 hash;
813 struct dev_mc_list *mclist; 811 struct dev_mc_list *mclist;
814 u32 reg, val; 812 u32 reg, val;
@@ -826,10 +824,8 @@ static void bdx_setmulti(struct net_device *ndev)
826 /* TBD: sort addreses and write them in ascending order 824 /* TBD: sort addreses and write them in ascending order
827 * into RX_MAC_MCST regs. we skip this phase now and accept ALL 825 * into RX_MAC_MCST regs. we skip this phase now and accept ALL
828 * multicast frames throu IMF */ 826 * multicast frames throu IMF */
829 mclist = ndev->mc_list;
830
831 /* accept the rest of addresses throu IMF */ 827 /* accept the rest of addresses throu IMF */
832 for (; mclist; mclist = mclist->next) { 828 netdev_for_each_mc_addr(mclist, ndev) {
833 hash = 0; 829 hash = 0;
834 for (i = 0; i < ETH_ALEN; i++) 830 for (i = 0; i < ETH_ALEN; i++)
835 hash ^= mclist->dmi_addr[i]; 831 hash ^= mclist->dmi_addr[i];
@@ -840,7 +836,7 @@ static void bdx_setmulti(struct net_device *ndev)
840 } 836 }
841 837
842 } else { 838 } else {
843 DBG("only own mac %d\n", ndev->mc_count); 839 DBG("only own mac %d\n", netdev_mc_count(ndev));
844 rxf_val |= GMAC_RX_FILTER_AB; 840 rxf_val |= GMAC_RX_FILTER_AB;
845 } 841 }
846 WRITE_REG(priv, regGMAC_RXF_A, rxf_val); 842 WRITE_REG(priv, regGMAC_RXF_A, rxf_val);
@@ -1028,17 +1024,16 @@ static int bdx_rx_init(struct bdx_priv *priv)
1028 regRXF_CFG0_0, regRXF_CFG1_0, 1024 regRXF_CFG0_0, regRXF_CFG1_0,
1029 regRXF_RPTR_0, regRXF_WPTR_0)) 1025 regRXF_RPTR_0, regRXF_WPTR_0))
1030 goto err_mem; 1026 goto err_mem;
1031 if (! 1027 priv->rxdb = bdx_rxdb_create(priv->rxf_fifo0.m.memsz /
1032 (priv->rxdb = 1028 sizeof(struct rxf_desc));
1033 bdx_rxdb_create(priv->rxf_fifo0.m.memsz / 1029 if (!priv->rxdb)
1034 sizeof(struct rxf_desc))))
1035 goto err_mem; 1030 goto err_mem;
1036 1031
1037 priv->rxf_fifo0.m.pktsz = priv->ndev->mtu + VLAN_ETH_HLEN; 1032 priv->rxf_fifo0.m.pktsz = priv->ndev->mtu + VLAN_ETH_HLEN;
1038 return 0; 1033 return 0;
1039 1034
1040err_mem: 1035err_mem:
1041 ERR("%s: %s: Rx init failed\n", BDX_DRV_NAME, priv->ndev->name); 1036 netdev_err(priv->ndev, "Rx init failed\n");
1042 return -ENOMEM; 1037 return -ENOMEM;
1043} 1038}
1044 1039
@@ -1115,8 +1110,9 @@ static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1115 ENTER; 1110 ENTER;
1116 dno = bdx_rxdb_available(db) - 1; 1111 dno = bdx_rxdb_available(db) - 1;
1117 while (dno > 0) { 1112 while (dno > 0) {
1118 if (!(skb = dev_alloc_skb(f->m.pktsz + NET_IP_ALIGN))) { 1113 skb = dev_alloc_skb(f->m.pktsz + NET_IP_ALIGN);
1119 ERR("NO MEM: dev_alloc_skb failed\n"); 1114 if (!skb) {
1115 pr_err("NO MEM: dev_alloc_skb failed\n");
1120 break; 1116 break;
1121 } 1117 }
1122 skb->dev = priv->ndev; 1118 skb->dev = priv->ndev;
@@ -1337,9 +1333,7 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
1337static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len, 1333static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
1338 u16 rxd_vlan) 1334 u16 rxd_vlan)
1339{ 1335{
1340 DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d " 1336 DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d va_lo %d va_hi %d\n",
1341 "pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d "
1342 "va_lo %d va_hi %d\n",
1343 GET_RXD_BC(rxd_val1), GET_RXD_RXFQ(rxd_val1), GET_RXD_TO(rxd_val1), 1337 GET_RXD_BC(rxd_val1), GET_RXD_RXFQ(rxd_val1), GET_RXD_TO(rxd_val1),
1344 GET_RXD_TYPE(rxd_val1), GET_RXD_ERR(rxd_val1), 1338 GET_RXD_TYPE(rxd_val1), GET_RXD_ERR(rxd_val1),
1345 GET_RXD_RXP(rxd_val1), GET_RXD_PKT_ID(rxd_val1), 1339 GET_RXD_RXP(rxd_val1), GET_RXD_PKT_ID(rxd_val1),
@@ -1591,7 +1585,7 @@ static int bdx_tx_init(struct bdx_priv *priv)
1591 return 0; 1585 return 0;
1592 1586
1593err_mem: 1587err_mem:
1594 ERR("tehuti: %s: Tx init failed\n", priv->ndev->name); 1588 netdev_err(priv->ndev, "Tx init failed\n");
1595 return -ENOMEM; 1589 return -ENOMEM;
1596} 1590}
1597 1591
@@ -1609,7 +1603,7 @@ static inline int bdx_tx_space(struct bdx_priv *priv)
1609 fsize = f->m.rptr - f->m.wptr; 1603 fsize = f->m.rptr - f->m.wptr;
1610 if (fsize <= 0) 1604 if (fsize <= 0)
1611 fsize = f->m.memsz + fsize; 1605 fsize = f->m.memsz + fsize;
1612 return (fsize); 1606 return fsize;
1613} 1607}
1614 1608
1615/* bdx_tx_transmit - send packet to NIC 1609/* bdx_tx_transmit - send packet to NIC
@@ -1937,8 +1931,9 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1937 RET(-ENOMEM); 1931 RET(-ENOMEM);
1938 1932
1939 /************** pci *****************/ 1933 /************** pci *****************/
1940 if ((err = pci_enable_device(pdev))) /* it trigers interrupt, dunno why. */ 1934 err = pci_enable_device(pdev);
1941 goto err_pci; /* it's not a problem though */ 1935 if (err) /* it triggers interrupt, dunno why. */
1936 goto err_pci; /* it's not a problem though */
1942 1937
1943 if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) && 1938 if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
1944 !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) { 1939 !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
@@ -1946,14 +1941,14 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1946 } else { 1941 } else {
1947 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) || 1942 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
1948 (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) { 1943 (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
1949 printk(KERN_ERR "tehuti: No usable DMA configuration" 1944 pr_err("No usable DMA configuration, aborting\n");
1950 ", aborting\n");
1951 goto err_dma; 1945 goto err_dma;
1952 } 1946 }
1953 pci_using_dac = 0; 1947 pci_using_dac = 0;
1954 } 1948 }
1955 1949
1956 if ((err = pci_request_regions(pdev, BDX_DRV_NAME))) 1950 err = pci_request_regions(pdev, BDX_DRV_NAME);
1951 if (err)
1957 goto err_dma; 1952 goto err_dma;
1958 1953
1959 pci_set_master(pdev); 1954 pci_set_master(pdev);
@@ -1961,25 +1956,26 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1961 pciaddr = pci_resource_start(pdev, 0); 1956 pciaddr = pci_resource_start(pdev, 0);
1962 if (!pciaddr) { 1957 if (!pciaddr) {
1963 err = -EIO; 1958 err = -EIO;
1964 ERR("tehuti: no MMIO resource\n"); 1959 pr_err("no MMIO resource\n");
1965 goto err_out_res; 1960 goto err_out_res;
1966 } 1961 }
1967 if ((regionSize = pci_resource_len(pdev, 0)) < BDX_REGS_SIZE) { 1962 regionSize = pci_resource_len(pdev, 0);
1963 if (regionSize < BDX_REGS_SIZE) {
1968 err = -EIO; 1964 err = -EIO;
1969 ERR("tehuti: MMIO resource (%x) too small\n", regionSize); 1965 pr_err("MMIO resource (%x) too small\n", regionSize);
1970 goto err_out_res; 1966 goto err_out_res;
1971 } 1967 }
1972 1968
1973 nic->regs = ioremap(pciaddr, regionSize); 1969 nic->regs = ioremap(pciaddr, regionSize);
1974 if (!nic->regs) { 1970 if (!nic->regs) {
1975 err = -EIO; 1971 err = -EIO;
1976 ERR("tehuti: ioremap failed\n"); 1972 pr_err("ioremap failed\n");
1977 goto err_out_res; 1973 goto err_out_res;
1978 } 1974 }
1979 1975
1980 if (pdev->irq < 2) { 1976 if (pdev->irq < 2) {
1981 err = -EIO; 1977 err = -EIO;
1982 ERR("tehuti: invalid irq (%d)\n", pdev->irq); 1978 pr_err("invalid irq (%d)\n", pdev->irq);
1983 goto err_out_iomap; 1979 goto err_out_iomap;
1984 } 1980 }
1985 pci_set_drvdata(pdev, nic); 1981 pci_set_drvdata(pdev, nic);
@@ -1996,8 +1992,9 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1996 nic->irq_type = IRQ_INTX; 1992 nic->irq_type = IRQ_INTX;
1997#ifdef BDX_MSI 1993#ifdef BDX_MSI
1998 if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) { 1994 if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
1999 if ((err = pci_enable_msi(pdev))) 1995 err = pci_enable_msi(pdev);
2000 ERR("Tehuti: Can't eneble msi. error is %d\n", err); 1996 if (err)
1997 pr_err("Can't eneble msi. error is %d\n", err);
2001 else 1998 else
2002 nic->irq_type = IRQ_MSI; 1999 nic->irq_type = IRQ_MSI;
2003 } else 2000 } else
@@ -2006,9 +2003,10 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2006 2003
2007 /************** netdev **************/ 2004 /************** netdev **************/
2008 for (port = 0; port < nic->port_num; port++) { 2005 for (port = 0; port < nic->port_num; port++) {
2009 if (!(ndev = alloc_etherdev(sizeof(struct bdx_priv)))) { 2006 ndev = alloc_etherdev(sizeof(struct bdx_priv));
2007 if (!ndev) {
2010 err = -ENOMEM; 2008 err = -ENOMEM;
2011 printk(KERN_ERR "tehuti: alloc_etherdev failed\n"); 2009 pr_err("alloc_etherdev failed\n");
2012 goto err_out_iomap; 2010 goto err_out_iomap;
2013 } 2011 }
2014 2012
@@ -2075,12 +2073,13 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2075 2073
2076 /*bdx_hw_reset(priv); */ 2074 /*bdx_hw_reset(priv); */
2077 if (bdx_read_mac(priv)) { 2075 if (bdx_read_mac(priv)) {
2078 printk(KERN_ERR "tehuti: load MAC address failed\n"); 2076 pr_err("load MAC address failed\n");
2079 goto err_out_iomap; 2077 goto err_out_iomap;
2080 } 2078 }
2081 SET_NETDEV_DEV(ndev, &pdev->dev); 2079 SET_NETDEV_DEV(ndev, &pdev->dev);
2082 if ((err = register_netdev(ndev))) { 2080 err = register_netdev(ndev);
2083 printk(KERN_ERR "tehuti: register_netdev failed\n"); 2081 if (err) {
2082 pr_err("register_netdev failed\n");
2084 goto err_out_free; 2083 goto err_out_free;
2085 } 2084 }
2086 netif_carrier_off(ndev); 2085 netif_carrier_off(ndev);
@@ -2294,13 +2293,13 @@ bdx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
2294/* Convert RX fifo size to number of pending packets */ 2293/* Convert RX fifo size to number of pending packets */
2295static inline int bdx_rx_fifo_size_to_packets(int rx_size) 2294static inline int bdx_rx_fifo_size_to_packets(int rx_size)
2296{ 2295{
2297 return ((FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc)); 2296 return (FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc);
2298} 2297}
2299 2298
2300/* Convert TX fifo size to number of pending packets */ 2299/* Convert TX fifo size to number of pending packets */
2301static inline int bdx_tx_fifo_size_to_packets(int tx_size) 2300static inline int bdx_tx_fifo_size_to_packets(int tx_size)
2302{ 2301{
2303 return ((FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ); 2302 return (FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ;
2304} 2303}
2305 2304
2306/* 2305/*
@@ -2392,10 +2391,10 @@ static int bdx_get_sset_count(struct net_device *netdev, int stringset)
2392 case ETH_SS_STATS: 2391 case ETH_SS_STATS:
2393 BDX_ASSERT(ARRAY_SIZE(bdx_stat_names) 2392 BDX_ASSERT(ARRAY_SIZE(bdx_stat_names)
2394 != sizeof(struct bdx_stats) / sizeof(u64)); 2393 != sizeof(struct bdx_stats) / sizeof(u64));
2395 return ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0); 2394 return (priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0;
2396 default:
2397 return -EINVAL;
2398 } 2395 }
2396
2397 return -EINVAL;
2399} 2398}
2400 2399
2401/* 2400/*
@@ -2493,10 +2492,8 @@ static struct pci_driver bdx_pci_driver = {
2493 */ 2492 */
2494static void __init print_driver_id(void) 2493static void __init print_driver_id(void)
2495{ 2494{
2496 printk(KERN_INFO "%s: %s, %s\n", BDX_DRV_NAME, BDX_DRV_DESC, 2495 pr_info("%s, %s\n", BDX_DRV_DESC, BDX_DRV_VERSION);
2497 BDX_DRV_VERSION); 2496 pr_info("Options: hw_csum %s\n", BDX_MSI_STRING);
2498 printk(KERN_INFO "%s: Options: hw_csum %s\n", BDX_DRV_NAME,
2499 BDX_MSI_STRING);
2500} 2497}
2501 2498
2502static int __init bdx_module_init(void) 2499static int __init bdx_module_init(void)
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index 124141909e42..a19dcf8b6b56 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -529,28 +529,34 @@ struct txd_desc {
529 529
530/* Debugging Macros */ 530/* Debugging Macros */
531 531
532#define ERR(fmt, args...) printk(KERN_ERR fmt, ## args) 532#define DBG2(fmt, args...) \
533#define DBG2(fmt, args...) \ 533 pr_err("%s:%-5d: " fmt, __func__, __LINE__, ## args)
534 printk(KERN_ERR "%s:%-5d: " fmt, __func__, __LINE__, ## args)
535 534
536#define BDX_ASSERT(x) BUG_ON(x) 535#define BDX_ASSERT(x) BUG_ON(x)
537 536
538#ifdef DEBUG 537#ifdef DEBUG
539 538
540#define ENTER do { \ 539#define ENTER \
541 printk(KERN_ERR "%s:%-5d: ENTER\n", __func__, __LINE__); \ 540do { \
541 pr_err("%s:%-5d: ENTER\n", __func__, __LINE__); \
542} while (0) 542} while (0)
543 543
544#define RET(args...) do { \ 544#define RET(args...) \
545 printk(KERN_ERR "%s:%-5d: RETURN\n", __func__, __LINE__); \ 545do { \
546return args; } while (0) 546 pr_err("%s:%-5d: RETURN\n", __func__, __LINE__); \
547 return args; \
548} while (0)
547 549
548#define DBG(fmt, args...) \ 550#define DBG(fmt, args...) \
549 printk(KERN_ERR "%s:%-5d: " fmt, __func__, __LINE__, ## args) 551 pr_err("%s:%-5d: " fmt, __func__, __LINE__, ## args)
550#else 552#else
551#define ENTER do { } while (0) 553#define ENTER do { } while (0)
552#define RET(args...) return args 554#define RET(args...) return args
553#define DBG(fmt, args...) do { } while (0) 555#define DBG(fmt, args...) \
556do { \
557 if (0) \
558 pr_err(fmt, ##args); \
559} while (0)
554#endif 560#endif
555 561
556#endif /* _BDX__H */ 562#endif /* _BDX__H */
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 7f82b0238e08..0fa7688ab483 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -67,9 +67,8 @@
67#include "tg3.h" 67#include "tg3.h"
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 70#define DRV_MODULE_VERSION "3.108"
71#define DRV_MODULE_VERSION "3.106" 71#define DRV_MODULE_RELDATE "February 17, 2010"
72#define DRV_MODULE_RELDATE "January 12, 2010"
73 72
74#define TG3_DEF_MAC_MODE 0 73#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 74#define TG3_DEF_RX_MODE 0
@@ -158,7 +157,7 @@
158#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" 157#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
159 158
160static char version[] __devinitdata = 159static char version[] __devinitdata =
161 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 160 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
162 161
163MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); 162MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
164MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); 163MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
@@ -174,7 +173,7 @@ static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
174module_param(tg3_debug, int, 0); 173module_param(tg3_debug, int, 0);
175MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); 174MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
176 175
177static struct pci_device_id tg3_pci_tbl[] = { 176static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, 177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, 178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, 179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
@@ -244,6 +243,12 @@ static struct pci_device_id tg3_pci_tbl[] = {
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, 243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, 244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)}, 245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
247 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 252 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
248 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 253 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
249 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 254 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -636,7 +641,6 @@ static void tg3_disable_ints(struct tg3 *tp)
636static void tg3_enable_ints(struct tg3 *tp) 641static void tg3_enable_ints(struct tg3 *tp)
637{ 642{
638 int i; 643 int i;
639 u32 coal_now = 0;
640 644
641 tp->irq_sync = 0; 645 tp->irq_sync = 0;
642 wmb(); 646 wmb();
@@ -644,13 +648,14 @@ static void tg3_enable_ints(struct tg3 *tp)
644 tw32(TG3PCI_MISC_HOST_CTRL, 648 tw32(TG3PCI_MISC_HOST_CTRL,
645 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 649 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
646 650
651 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
647 for (i = 0; i < tp->irq_cnt; i++) { 652 for (i = 0; i < tp->irq_cnt; i++) {
648 struct tg3_napi *tnapi = &tp->napi[i]; 653 struct tg3_napi *tnapi = &tp->napi[i];
649 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 654 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
650 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) 655 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
651 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 656 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
652 657
653 coal_now |= tnapi->coal_now; 658 tp->coal_now |= tnapi->coal_now;
654 } 659 }
655 660
656 /* Force an initial interrupt */ 661 /* Force an initial interrupt */
@@ -658,8 +663,9 @@ static void tg3_enable_ints(struct tg3 *tp)
658 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) 663 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
659 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 664 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
660 else 665 else
661 tw32(HOSTCC_MODE, tp->coalesce_mode | 666 tw32(HOSTCC_MODE, tp->coal_now);
662 HOSTCC_MODE_ENABLE | coal_now); 667
668 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
663} 669}
664 670
665static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) 671static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
@@ -948,17 +954,17 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
948 954
949 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 955 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
950 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 956 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
951 case TG3_PHY_ID_BCM50610: 957 case PHY_ID_BCM50610:
952 case TG3_PHY_ID_BCM50610M: 958 case PHY_ID_BCM50610M:
953 val = MAC_PHYCFG2_50610_LED_MODES; 959 val = MAC_PHYCFG2_50610_LED_MODES;
954 break; 960 break;
955 case TG3_PHY_ID_BCMAC131: 961 case PHY_ID_BCMAC131:
956 val = MAC_PHYCFG2_AC131_LED_MODES; 962 val = MAC_PHYCFG2_AC131_LED_MODES;
957 break; 963 break;
958 case TG3_PHY_ID_RTL8211C: 964 case PHY_ID_RTL8211C:
959 val = MAC_PHYCFG2_RTL8211C_LED_MODES; 965 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
960 break; 966 break;
961 case TG3_PHY_ID_RTL8201E: 967 case PHY_ID_RTL8201E:
962 val = MAC_PHYCFG2_RTL8201E_LED_MODES; 968 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
963 break; 969 break;
964 default: 970 default:
@@ -977,7 +983,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
977 return; 983 return;
978 } 984 }
979 985
980 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) 986 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
981 val |= MAC_PHYCFG2_EMODE_MASK_MASK | 987 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
982 MAC_PHYCFG2_FMODE_MASK_MASK | 988 MAC_PHYCFG2_FMODE_MASK_MASK |
983 MAC_PHYCFG2_GMODE_MASK_MASK | 989 MAC_PHYCFG2_GMODE_MASK_MASK |
@@ -990,7 +996,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
990 val = tr32(MAC_PHYCFG1); 996 val = tr32(MAC_PHYCFG1);
991 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | 997 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
992 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); 998 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
993 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) { 999 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
994 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) 1000 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
995 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; 1001 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
996 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) 1002 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
@@ -1008,7 +1014,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
1008 MAC_RGMII_MODE_TX_ENABLE | 1014 MAC_RGMII_MODE_TX_ENABLE |
1009 MAC_RGMII_MODE_TX_LOWPWR | 1015 MAC_RGMII_MODE_TX_LOWPWR |
1010 MAC_RGMII_MODE_TX_RESET); 1016 MAC_RGMII_MODE_TX_RESET);
1011 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) { 1017 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1012 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) 1018 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1013 val |= MAC_RGMII_MODE_RX_INT_B | 1019 val |= MAC_RGMII_MODE_RX_INT_B |
1014 MAC_RGMII_MODE_RX_QUALITY | 1020 MAC_RGMII_MODE_RX_QUALITY |
@@ -1028,6 +1034,17 @@ static void tg3_mdio_start(struct tg3 *tp)
1028 tw32_f(MAC_MI_MODE, tp->mi_mode); 1034 tw32_f(MAC_MI_MODE, tp->mi_mode);
1029 udelay(80); 1035 udelay(80);
1030 1036
1037 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1038 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1039 tg3_mdio_config_5785(tp);
1040}
1041
1042static int tg3_mdio_init(struct tg3 *tp)
1043{
1044 int i;
1045 u32 reg;
1046 struct phy_device *phydev;
1047
1031 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 1048 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
1032 u32 funcnum, is_serdes; 1049 u32 funcnum, is_serdes;
1033 1050
@@ -1047,17 +1064,6 @@ static void tg3_mdio_start(struct tg3 *tp)
1047 } else 1064 } else
1048 tp->phy_addr = TG3_PHY_MII_ADDR; 1065 tp->phy_addr = TG3_PHY_MII_ADDR;
1049 1066
1050 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1051 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1052 tg3_mdio_config_5785(tp);
1053}
1054
1055static int tg3_mdio_init(struct tg3 *tp)
1056{
1057 int i;
1058 u32 reg;
1059 struct phy_device *phydev;
1060
1061 tg3_mdio_start(tp); 1067 tg3_mdio_start(tp);
1062 1068
1063 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) || 1069 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
@@ -1092,8 +1098,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1092 1098
1093 i = mdiobus_register(tp->mdio_bus); 1099 i = mdiobus_register(tp->mdio_bus);
1094 if (i) { 1100 if (i) {
1095 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n", 1101 netdev_warn(tp->dev, "mdiobus_reg failed (0x%x)\n", i);
1096 tp->dev->name, i);
1097 mdiobus_free(tp->mdio_bus); 1102 mdiobus_free(tp->mdio_bus);
1098 return i; 1103 return i;
1099 } 1104 }
@@ -1101,35 +1106,35 @@ static int tg3_mdio_init(struct tg3 *tp)
1101 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 1106 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1102 1107
1103 if (!phydev || !phydev->drv) { 1108 if (!phydev || !phydev->drv) {
1104 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name); 1109 netdev_warn(tp->dev, "No PHY devices\n");
1105 mdiobus_unregister(tp->mdio_bus); 1110 mdiobus_unregister(tp->mdio_bus);
1106 mdiobus_free(tp->mdio_bus); 1111 mdiobus_free(tp->mdio_bus);
1107 return -ENODEV; 1112 return -ENODEV;
1108 } 1113 }
1109 1114
1110 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1115 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1111 case TG3_PHY_ID_BCM57780: 1116 case PHY_ID_BCM57780:
1112 phydev->interface = PHY_INTERFACE_MODE_GMII; 1117 phydev->interface = PHY_INTERFACE_MODE_GMII;
1113 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1118 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1114 break; 1119 break;
1115 case TG3_PHY_ID_BCM50610: 1120 case PHY_ID_BCM50610:
1116 case TG3_PHY_ID_BCM50610M: 1121 case PHY_ID_BCM50610M:
1117 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | 1122 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1118 PHY_BRCM_RX_REFCLK_UNUSED | 1123 PHY_BRCM_RX_REFCLK_UNUSED |
1119 PHY_BRCM_DIS_TXCRXC_NOENRGY | 1124 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1120 PHY_BRCM_AUTO_PWRDWN_ENABLE; 1125 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1121 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) 1126 if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
1122 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; 1127 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1123 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) 1128 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1124 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; 1129 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1125 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) 1130 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1126 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; 1131 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1127 /* fallthru */ 1132 /* fallthru */
1128 case TG3_PHY_ID_RTL8211C: 1133 case PHY_ID_RTL8211C:
1129 phydev->interface = PHY_INTERFACE_MODE_RGMII; 1134 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1130 break; 1135 break;
1131 case TG3_PHY_ID_RTL8201E: 1136 case PHY_ID_RTL8201E:
1132 case TG3_PHY_ID_BCMAC131: 1137 case PHY_ID_BCMAC131:
1133 phydev->interface = PHY_INTERFACE_MODE_MII; 1138 phydev->interface = PHY_INTERFACE_MODE_MII;
1134 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1139 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1135 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; 1140 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
@@ -1245,27 +1250,22 @@ static void tg3_ump_link_report(struct tg3 *tp)
1245static void tg3_link_report(struct tg3 *tp) 1250static void tg3_link_report(struct tg3 *tp)
1246{ 1251{
1247 if (!netif_carrier_ok(tp->dev)) { 1252 if (!netif_carrier_ok(tp->dev)) {
1248 if (netif_msg_link(tp)) 1253 netif_info(tp, link, tp->dev, "Link is down\n");
1249 printk(KERN_INFO PFX "%s: Link is down.\n",
1250 tp->dev->name);
1251 tg3_ump_link_report(tp); 1254 tg3_ump_link_report(tp);
1252 } else if (netif_msg_link(tp)) { 1255 } else if (netif_msg_link(tp)) {
1253 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n", 1256 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1254 tp->dev->name, 1257 (tp->link_config.active_speed == SPEED_1000 ?
1255 (tp->link_config.active_speed == SPEED_1000 ? 1258 1000 :
1256 1000 : 1259 (tp->link_config.active_speed == SPEED_100 ?
1257 (tp->link_config.active_speed == SPEED_100 ? 1260 100 : 10)),
1258 100 : 10)), 1261 (tp->link_config.active_duplex == DUPLEX_FULL ?
1259 (tp->link_config.active_duplex == DUPLEX_FULL ? 1262 "full" : "half"));
1260 "full" : "half")); 1263
1261 1264 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1262 printk(KERN_INFO PFX 1265 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1263 "%s: Flow control is %s for TX and %s for RX.\n", 1266 "on" : "off",
1264 tp->dev->name, 1267 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1265 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? 1268 "on" : "off");
1266 "on" : "off",
1267 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1268 "on" : "off");
1269 tg3_ump_link_report(tp); 1269 tg3_ump_link_report(tp);
1270 } 1270 }
1271} 1271}
@@ -1464,7 +1464,7 @@ static int tg3_phy_init(struct tg3 *tp)
1464 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, 1464 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1465 phydev->dev_flags, phydev->interface); 1465 phydev->dev_flags, phydev->interface);
1466 if (IS_ERR(phydev)) { 1466 if (IS_ERR(phydev)) {
1467 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name); 1467 netdev_err(tp->dev, "Could not attach to PHY\n");
1468 return PTR_ERR(phydev); 1468 return PTR_ERR(phydev);
1469 } 1469 }
1470 1470
@@ -1564,7 +1564,9 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1564{ 1564{
1565 u32 reg; 1565 u32 reg;
1566 1566
1567 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 1567 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1568 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1569 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1568 return; 1570 return;
1569 1571
1570 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { 1572 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
@@ -1939,6 +1941,10 @@ static int tg3_phy_reset(struct tg3 *tp)
1939 } 1941 }
1940 } 1942 }
1941 1943
1944 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1945 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
1946 return 0;
1947
1942 tg3_phy_apply_otp(tp); 1948 tg3_phy_apply_otp(tp);
1943 1949
1944 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) 1950 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
@@ -1982,7 +1988,7 @@ out:
1982 } 1988 }
1983 /* Set Extended packet length bit (bit 14) on all chips that */ 1989 /* Set Extended packet length bit (bit 14) on all chips that */
1984 /* support jumbo frames */ 1990 /* support jumbo frames */
1985 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { 1991 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1986 /* Cannot do read-modify-write on 5401 */ 1992 /* Cannot do read-modify-write on 5401 */
1987 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); 1993 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1988 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 1994 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
@@ -2019,7 +2025,9 @@ static void tg3_frob_aux_power(struct tg3 *tp)
2019{ 2025{
2020 struct tg3 *tp_peer = tp; 2026 struct tg3 *tp_peer = tp;
2021 2027
2022 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0) 2028 /* The GPIOs do something completely different on 57765. */
2029 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2030 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2023 return; 2031 return;
2024 2032
2025 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 2033 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
@@ -2132,7 +2140,7 @@ static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2132{ 2140{
2133 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) 2141 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2134 return 1; 2142 return 1;
2135 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) { 2143 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2136 if (speed != SPEED_10) 2144 if (speed != SPEED_10)
2137 return 1; 2145 return 1;
2138 } else if (speed == SPEED_10) 2146 } else if (speed == SPEED_10)
@@ -2485,8 +2493,8 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2485 break; 2493 break;
2486 2494
2487 default: 2495 default:
2488 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n", 2496 netdev_err(tp->dev, "Invalid power state (D%d) requested\n",
2489 tp->dev->name, state); 2497 state);
2490 return -EINVAL; 2498 return -EINVAL;
2491 } 2499 }
2492 2500
@@ -2548,11 +2556,11 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2548 phy_start_aneg(phydev); 2556 phy_start_aneg(phydev);
2549 2557
2550 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; 2558 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2551 if (phyid != TG3_PHY_ID_BCMAC131) { 2559 if (phyid != PHY_ID_BCMAC131) {
2552 phyid &= TG3_PHY_OUI_MASK; 2560 phyid &= PHY_BCM_OUI_MASK;
2553 if (phyid == TG3_PHY_OUI_1 || 2561 if (phyid == PHY_BCM_OUI_1 ||
2554 phyid == TG3_PHY_OUI_2 || 2562 phyid == PHY_BCM_OUI_2 ||
2555 phyid == TG3_PHY_OUI_3) 2563 phyid == PHY_BCM_OUI_3)
2556 do_low_power = true; 2564 do_low_power = true;
2557 } 2565 }
2558 } 2566 }
@@ -3062,7 +3070,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3062 if (force_reset) 3070 if (force_reset)
3063 tg3_phy_reset(tp); 3071 tg3_phy_reset(tp);
3064 3072
3065 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { 3073 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3066 tg3_readphy(tp, MII_BMSR, &bmsr); 3074 tg3_readphy(tp, MII_BMSR, &bmsr);
3067 if (tg3_readphy(tp, MII_BMSR, &bmsr) || 3075 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3068 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) 3076 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
@@ -3083,7 +3091,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3083 } 3091 }
3084 } 3092 }
3085 3093
3086 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 && 3094 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3095 TG3_PHY_REV_BCM5401_B0 &&
3087 !(bmsr & BMSR_LSTATUS) && 3096 !(bmsr & BMSR_LSTATUS) &&
3088 tp->link_config.active_speed == SPEED_1000) { 3097 tp->link_config.active_speed == SPEED_1000) {
3089 err = tg3_phy_reset(tp); 3098 err = tg3_phy_reset(tp);
@@ -3238,7 +3247,7 @@ relink:
3238 /* ??? Without this setting Netgear GA302T PHY does not 3247 /* ??? Without this setting Netgear GA302T PHY does not
3239 * ??? send/receive packets... 3248 * ??? send/receive packets...
3240 */ 3249 */
3241 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 && 3250 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3242 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) { 3251 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3243 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; 3252 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3244 tw32_f(MAC_MI_MODE, tp->mi_mode); 3253 tw32_f(MAC_MI_MODE, tp->mi_mode);
@@ -3953,7 +3962,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3953 tw32_f(MAC_MODE, tp->mac_mode); 3962 tw32_f(MAC_MODE, tp->mac_mode);
3954 udelay(40); 3963 udelay(40);
3955 3964
3956 if (tp->phy_id == PHY_ID_BCM8002) 3965 if (tp->phy_id == TG3_PHY_ID_BCM8002)
3957 tg3_init_bcm8002(tp); 3966 tg3_init_bcm8002(tp);
3958 3967
3959 /* Enable link change event even when serdes polling. */ 3968 /* Enable link change event even when serdes polling. */
@@ -4326,10 +4335,8 @@ static void tg3_tx_recover(struct tg3 *tp)
4326 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) || 4335 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4327 tp->write32_tx_mbox == tg3_write_indirect_mbox); 4336 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4328 4337
4329 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-" 4338 netdev_warn(tp->dev, "The system may be re-ordering memory-mapped I/O cycles to the network device, attempting to recover\n"
4330 "mapped I/O cycles to the network device, attempting to " 4339 "Please report the problem to the driver maintainer and include system chipset information.\n");
4331 "recover. Please report the problem to the driver maintainer "
4332 "and include system chipset information.\n", tp->dev->name);
4333 4340
4334 spin_lock(&tp->lock); 4341 spin_lock(&tp->lock);
4335 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING; 4342 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
@@ -4538,6 +4545,12 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
4538 pci_unmap_addr(src_map, mapping)); 4545 pci_unmap_addr(src_map, mapping));
4539 dest_desc->addr_hi = src_desc->addr_hi; 4546 dest_desc->addr_hi = src_desc->addr_hi;
4540 dest_desc->addr_lo = src_desc->addr_lo; 4547 dest_desc->addr_lo = src_desc->addr_lo;
4548
4549 /* Ensure that the update to the skb happens after the physical
4550 * addresses have been transferred to the new BD location.
4551 */
4552 smp_wmb();
4553
4541 src_map->skb = NULL; 4554 src_map->skb = NULL;
4542} 4555}
4543 4556
@@ -4638,11 +4651,16 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4638 if (skb_size < 0) 4651 if (skb_size < 0)
4639 goto drop_it; 4652 goto drop_it;
4640 4653
4641 ri->skb = NULL;
4642
4643 pci_unmap_single(tp->pdev, dma_addr, skb_size, 4654 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4644 PCI_DMA_FROMDEVICE); 4655 PCI_DMA_FROMDEVICE);
4645 4656
4657 /* Ensure that the update to the skb happens
4658 * after the usage of the old DMA mapping.
4659 */
4660 smp_wmb();
4661
4662 ri->skb = NULL;
4663
4646 skb_put(skb, len); 4664 skb_put(skb, len);
4647 } else { 4665 } else {
4648 struct sk_buff *copy_skb; 4666 struct sk_buff *copy_skb;
@@ -4719,7 +4737,7 @@ next_pkt_nopost:
4719 tw32_rx_mbox(tnapi->consmbox, sw_idx); 4737 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4720 4738
4721 /* Refill RX ring(s). */ 4739 /* Refill RX ring(s). */
4722 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) || tnapi == &tp->napi[1]) { 4740 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4723 if (work_mask & RXD_OPAQUE_RING_STD) { 4741 if (work_mask & RXD_OPAQUE_RING_STD) {
4724 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; 4742 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4725 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 4743 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
@@ -4741,7 +4759,8 @@ next_pkt_nopost:
4741 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; 4759 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4742 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE; 4760 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4743 4761
4744 napi_schedule(&tp->napi[1].napi); 4762 if (tnapi != &tp->napi[1])
4763 napi_schedule(&tp->napi[1].napi);
4745 } 4764 }
4746 4765
4747 return received; 4766 return received;
@@ -4773,12 +4792,12 @@ static void tg3_poll_link(struct tg3 *tp)
4773 } 4792 }
4774} 4793}
4775 4794
4776static void tg3_rx_prodring_xfer(struct tg3 *tp, 4795static int tg3_rx_prodring_xfer(struct tg3 *tp,
4777 struct tg3_rx_prodring_set *dpr, 4796 struct tg3_rx_prodring_set *dpr,
4778 struct tg3_rx_prodring_set *spr) 4797 struct tg3_rx_prodring_set *spr)
4779{ 4798{
4780 u32 si, di, cpycnt, src_prod_idx; 4799 u32 si, di, cpycnt, src_prod_idx;
4781 int i; 4800 int i, err = 0;
4782 4801
4783 while (1) { 4802 while (1) {
4784 src_prod_idx = spr->rx_std_prod_idx; 4803 src_prod_idx = spr->rx_std_prod_idx;
@@ -4801,6 +4820,23 @@ static void tg3_rx_prodring_xfer(struct tg3 *tp,
4801 si = spr->rx_std_cons_idx; 4820 si = spr->rx_std_cons_idx;
4802 di = dpr->rx_std_prod_idx; 4821 di = dpr->rx_std_prod_idx;
4803 4822
4823 for (i = di; i < di + cpycnt; i++) {
4824 if (dpr->rx_std_buffers[i].skb) {
4825 cpycnt = i - di;
4826 err = -ENOSPC;
4827 break;
4828 }
4829 }
4830
4831 if (!cpycnt)
4832 break;
4833
4834 /* Ensure that updates to the rx_std_buffers ring and the
4835 * shadowed hardware producer ring from tg3_recycle_skb() are
4836 * ordered correctly WRT the skb check above.
4837 */
4838 smp_rmb();
4839
4804 memcpy(&dpr->rx_std_buffers[di], 4840 memcpy(&dpr->rx_std_buffers[di],
4805 &spr->rx_std_buffers[si], 4841 &spr->rx_std_buffers[si],
4806 cpycnt * sizeof(struct ring_info)); 4842 cpycnt * sizeof(struct ring_info));
@@ -4841,6 +4877,23 @@ static void tg3_rx_prodring_xfer(struct tg3 *tp,
4841 si = spr->rx_jmb_cons_idx; 4877 si = spr->rx_jmb_cons_idx;
4842 di = dpr->rx_jmb_prod_idx; 4878 di = dpr->rx_jmb_prod_idx;
4843 4879
4880 for (i = di; i < di + cpycnt; i++) {
4881 if (dpr->rx_jmb_buffers[i].skb) {
4882 cpycnt = i - di;
4883 err = -ENOSPC;
4884 break;
4885 }
4886 }
4887
4888 if (!cpycnt)
4889 break;
4890
4891 /* Ensure that updates to the rx_jmb_buffers ring and the
4892 * shadowed hardware producer ring from tg3_recycle_skb() are
4893 * ordered correctly WRT the skb check above.
4894 */
4895 smp_rmb();
4896
4844 memcpy(&dpr->rx_jmb_buffers[di], 4897 memcpy(&dpr->rx_jmb_buffers[di],
4845 &spr->rx_jmb_buffers[si], 4898 &spr->rx_jmb_buffers[si],
4846 cpycnt * sizeof(struct ring_info)); 4899 cpycnt * sizeof(struct ring_info));
@@ -4858,6 +4911,8 @@ static void tg3_rx_prodring_xfer(struct tg3 *tp,
4858 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) % 4911 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4859 TG3_RX_JUMBO_RING_SIZE; 4912 TG3_RX_JUMBO_RING_SIZE;
4860 } 4913 }
4914
4915 return err;
4861} 4916}
4862 4917
4863static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) 4918static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
@@ -4879,27 +4934,29 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4879 work_done += tg3_rx(tnapi, budget - work_done); 4934 work_done += tg3_rx(tnapi, budget - work_done);
4880 4935
4881 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) { 4936 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4882 int i; 4937 struct tg3_rx_prodring_set *dpr = &tp->prodring[0];
4883 u32 std_prod_idx = tp->prodring[0].rx_std_prod_idx; 4938 int i, err = 0;
4884 u32 jmb_prod_idx = tp->prodring[0].rx_jmb_prod_idx; 4939 u32 std_prod_idx = dpr->rx_std_prod_idx;
4940 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
4885 4941
4886 for (i = 2; i < tp->irq_cnt; i++) 4942 for (i = 1; i < tp->irq_cnt; i++)
4887 tg3_rx_prodring_xfer(tp, tnapi->prodring, 4943 err |= tg3_rx_prodring_xfer(tp, dpr,
4888 tp->napi[i].prodring); 4944 tp->napi[i].prodring);
4889 4945
4890 wmb(); 4946 wmb();
4891 4947
4892 if (std_prod_idx != tp->prodring[0].rx_std_prod_idx) { 4948 if (std_prod_idx != dpr->rx_std_prod_idx)
4893 u32 mbox = TG3_RX_STD_PROD_IDX_REG; 4949 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4894 tw32_rx_mbox(mbox, tp->prodring[0].rx_std_prod_idx); 4950 dpr->rx_std_prod_idx);
4895 }
4896 4951
4897 if (jmb_prod_idx != tp->prodring[0].rx_jmb_prod_idx) { 4952 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
4898 u32 mbox = TG3_RX_JMB_PROD_IDX_REG; 4953 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4899 tw32_rx_mbox(mbox, tp->prodring[0].rx_jmb_prod_idx); 4954 dpr->rx_jmb_prod_idx);
4900 }
4901 4955
4902 mmiowb(); 4956 mmiowb();
4957
4958 if (err)
4959 tw32_f(HOSTCC_MODE, tp->coal_now);
4903 } 4960 }
4904 4961
4905 return work_done; 4962 return work_done;
@@ -5203,8 +5260,7 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5203 5260
5204 err = tg3_init_hw(tp, reset_phy); 5261 err = tg3_init_hw(tp, reset_phy);
5205 if (err) { 5262 if (err) {
5206 printk(KERN_ERR PFX "%s: Failed to re-initialize device, " 5263 netdev_err(tp->dev, "Failed to re-initialize device, aborting\n");
5207 "aborting.\n", tp->dev->name);
5208 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 5264 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5209 tg3_full_unlock(tp); 5265 tg3_full_unlock(tp);
5210 del_timer_sync(&tp->timer); 5266 del_timer_sync(&tp->timer);
@@ -5277,10 +5333,10 @@ out:
5277 5333
5278static void tg3_dump_short_state(struct tg3 *tp) 5334static void tg3_dump_short_state(struct tg3 *tp)
5279{ 5335{
5280 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n", 5336 netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5281 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS)); 5337 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5282 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n", 5338 netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5283 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS)); 5339 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5284} 5340}
5285 5341
5286static void tg3_tx_timeout(struct net_device *dev) 5342static void tg3_tx_timeout(struct net_device *dev)
@@ -5288,8 +5344,7 @@ static void tg3_tx_timeout(struct net_device *dev)
5288 struct tg3 *tp = netdev_priv(dev); 5344 struct tg3 *tp = netdev_priv(dev);
5289 5345
5290 if (netif_msg_tx_err(tp)) { 5346 if (netif_msg_tx_err(tp)) {
5291 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n", 5347 netdev_err(dev, "transmit timed out, resetting\n");
5292 dev->name);
5293 tg3_dump_short_state(tp); 5348 tg3_dump_short_state(tp);
5294 } 5349 }
5295 5350
@@ -5453,8 +5508,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5453 netif_tx_stop_queue(txq); 5508 netif_tx_stop_queue(txq);
5454 5509
5455 /* This is a hard error, log it. */ 5510 /* This is a hard error, log it. */
5456 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " 5511 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
5457 "queue awake!\n", dev->name);
5458 } 5512 }
5459 return NETDEV_TX_BUSY; 5513 return NETDEV_TX_BUSY;
5460 } 5514 }
@@ -5657,8 +5711,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5657 netif_tx_stop_queue(txq); 5711 netif_tx_stop_queue(txq);
5658 5712
5659 /* This is a hard error, log it. */ 5713 /* This is a hard error, log it. */
5660 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " 5714 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
5661 "queue awake!\n", dev->name);
5662 } 5715 }
5663 return NETDEV_TX_BUSY; 5716 return NETDEV_TX_BUSY;
5664 } 5717 }
@@ -6005,11 +6058,8 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
6005 /* Now allocate fresh SKBs for each rx ring. */ 6058 /* Now allocate fresh SKBs for each rx ring. */
6006 for (i = 0; i < tp->rx_pending; i++) { 6059 for (i = 0; i < tp->rx_pending; i++) {
6007 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { 6060 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6008 printk(KERN_WARNING PFX 6061 netdev_warn(tp->dev, "Using a smaller RX standard ring, only %d out of %d buffers were allocated successfully\n",
6009 "%s: Using a smaller RX standard ring, " 6062 i, tp->rx_pending);
6010 "only %d out of %d buffers were allocated "
6011 "successfully.\n",
6012 tp->dev->name, i, tp->rx_pending);
6013 if (i == 0) 6063 if (i == 0)
6014 goto initfail; 6064 goto initfail;
6015 tp->rx_pending = i; 6065 tp->rx_pending = i;
@@ -6022,31 +6072,28 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
6022 6072
6023 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES); 6073 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
6024 6074
6025 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { 6075 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
6026 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { 6076 goto done;
6027 struct tg3_rx_buffer_desc *rxd;
6028 6077
6029 rxd = &tpr->rx_jmb[i].std; 6078 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
6030 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; 6079 struct tg3_rx_buffer_desc *rxd;
6031 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6032 RXD_FLAG_JUMBO;
6033 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6034 (i << RXD_OPAQUE_INDEX_SHIFT));
6035 }
6036 6080
6037 for (i = 0; i < tp->rx_jumbo_pending; i++) { 6081 rxd = &tpr->rx_jmb[i].std;
6038 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, 6082 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6039 i) < 0) { 6083 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6040 printk(KERN_WARNING PFX 6084 RXD_FLAG_JUMBO;
6041 "%s: Using a smaller RX jumbo ring, " 6085 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6042 "only %d out of %d buffers were " 6086 (i << RXD_OPAQUE_INDEX_SHIFT));
6043 "allocated successfully.\n", 6087 }
6044 tp->dev->name, i, tp->rx_jumbo_pending); 6088
6045 if (i == 0) 6089 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6046 goto initfail; 6090 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6047 tp->rx_jumbo_pending = i; 6091 netdev_warn(tp->dev, "Using a smaller RX jumbo ring, only %d out of %d buffers were allocated successfully\n",
6048 break; 6092 i, tp->rx_jumbo_pending);
6049 } 6093 if (i == 0)
6094 goto initfail;
6095 tp->rx_jumbo_pending = i;
6096 break;
6050 } 6097 }
6051 } 6098 }
6052 6099
@@ -6159,8 +6206,7 @@ static void tg3_free_rings(struct tg3 *tp)
6159 dev_kfree_skb_any(skb); 6206 dev_kfree_skb_any(skb);
6160 } 6207 }
6161 6208
6162 if (tp->irq_cnt == 1 || j != tp->irq_cnt - 1) 6209 tg3_rx_prodring_free(tp, &tp->prodring[j]);
6163 tg3_rx_prodring_free(tp, &tp->prodring[j]);
6164 } 6210 }
6165} 6211}
6166 6212
@@ -6196,9 +6242,10 @@ static int tg3_init_rings(struct tg3 *tp)
6196 if (tnapi->rx_rcb) 6242 if (tnapi->rx_rcb)
6197 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 6243 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6198 6244
6199 if ((tp->irq_cnt == 1 || i != tp->irq_cnt - 1) && 6245 if (tg3_rx_prodring_alloc(tp, &tp->prodring[i])) {
6200 tg3_rx_prodring_alloc(tp, &tp->prodring[i])) 6246 tg3_free_rings(tp);
6201 return -ENOMEM; 6247 return -ENOMEM;
6248 }
6202 } 6249 }
6203 6250
6204 return 0; 6251 return 0;
@@ -6245,7 +6292,7 @@ static void tg3_free_consistent(struct tg3 *tp)
6245 tp->hw_stats = NULL; 6292 tp->hw_stats = NULL;
6246 } 6293 }
6247 6294
6248 for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) 6295 for (i = 0; i < tp->irq_cnt; i++)
6249 tg3_rx_prodring_fini(tp, &tp->prodring[i]); 6296 tg3_rx_prodring_fini(tp, &tp->prodring[i]);
6250} 6297}
6251 6298
@@ -6257,7 +6304,7 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6257{ 6304{
6258 int i; 6305 int i;
6259 6306
6260 for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) { 6307 for (i = 0; i < tp->irq_cnt; i++) {
6261 if (tg3_rx_prodring_init(tp, &tp->prodring[i])) 6308 if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
6262 goto err_out; 6309 goto err_out;
6263 } 6310 }
@@ -6322,10 +6369,7 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6322 break; 6369 break;
6323 } 6370 }
6324 6371
6325 if (tp->irq_cnt == 1) 6372 tnapi->prodring = &tp->prodring[i];
6326 tnapi->prodring = &tp->prodring[0];
6327 else if (i)
6328 tnapi->prodring = &tp->prodring[i - 1];
6329 6373
6330 /* 6374 /*
6331 * If multivector RSS is enabled, vector 0 does not handle 6375 * If multivector RSS is enabled, vector 0 does not handle
@@ -6389,8 +6433,7 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int
6389 } 6433 }
6390 6434
6391 if (i == MAX_WAIT_CNT && !silent) { 6435 if (i == MAX_WAIT_CNT && !silent) {
6392 printk(KERN_ERR PFX "tg3_stop_block timed out, " 6436 pr_err("tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6393 "ofs=%lx enable_bit=%x\n",
6394 ofs, enable_bit); 6437 ofs, enable_bit);
6395 return -ENODEV; 6438 return -ENODEV;
6396 } 6439 }
@@ -6437,9 +6480,8 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
6437 break; 6480 break;
6438 } 6481 }
6439 if (i >= MAX_WAIT_CNT) { 6482 if (i >= MAX_WAIT_CNT) {
6440 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, " 6483 netdev_err(tp->dev, "%s timed out, TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
6441 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n", 6484 __func__, tr32(MAC_TX_MODE));
6442 tp->dev->name, tr32(MAC_TX_MODE));
6443 err |= -ENODEV; 6485 err |= -ENODEV;
6444 } 6486 }
6445 6487
@@ -6660,8 +6702,14 @@ static int tg3_poll_fw(struct tg3 *tp)
6660 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) { 6702 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6661 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED; 6703 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6662 6704
6663 printk(KERN_INFO PFX "%s: No firmware running.\n", 6705 netdev_info(tp->dev, "No firmware running\n");
6664 tp->dev->name); 6706 }
6707
6708 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6709 /* The 57765 A0 needs a little more
6710 * time to do some important work.
6711 */
6712 mdelay(10);
6665 } 6713 }
6666 6714
6667 return 0; 6715 return 0;
@@ -7082,10 +7130,8 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7082 } 7130 }
7083 7131
7084 if (i >= 10000) { 7132 if (i >= 10000) {
7085 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, " 7133 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7086 "and %s CPU\n", 7134 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7087 tp->dev->name,
7088 (offset == RX_CPU_BASE ? "RX" : "TX"));
7089 return -ENODEV; 7135 return -ENODEV;
7090 } 7136 }
7091 7137
@@ -7110,9 +7156,8 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
7110 7156
7111 if (cpu_base == TX_CPU_BASE && 7157 if (cpu_base == TX_CPU_BASE &&
7112 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 7158 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7113 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load " 7159 netdev_err(tp->dev, "%s: Trying to load TX cpu firmware which is 5705\n",
7114 "TX cpu firmware on %s which is 5705.\n", 7160 __func__);
7115 tp->dev->name);
7116 return -EINVAL; 7161 return -EINVAL;
7117 } 7162 }
7118 7163
@@ -7191,10 +7236,8 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7191 udelay(1000); 7236 udelay(1000);
7192 } 7237 }
7193 if (i >= 5) { 7238 if (i >= 5) {
7194 printk(KERN_ERR PFX "tg3_load_firmware fails for %s " 7239 netdev_err(tp->dev, "tg3_load_firmware fails to set RX CPU PC, is %08x should be %08x\n",
7195 "to set RX CPU PC, is %08x should be %08x\n", 7240 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7196 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
7197 info.fw_base);
7198 return -ENODEV; 7241 return -ENODEV;
7199 } 7242 }
7200 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); 7243 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
@@ -7257,10 +7300,8 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
7257 udelay(1000); 7300 udelay(1000);
7258 } 7301 }
7259 if (i >= 5) { 7302 if (i >= 5) {
7260 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s " 7303 netdev_err(tp->dev, "%s fails to set CPU PC, is %08x should be %08x\n",
7261 "to set CPU PC, is %08x should be %08x\n", 7304 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7262 tp->dev->name, tr32(cpu_base + CPU_PC),
7263 info.fw_base);
7264 return -ENODEV; 7305 return -ENODEV;
7265 } 7306 }
7266 tw32(cpu_base + CPU_STATE, 0xffffffff); 7307 tw32(cpu_base + CPU_STATE, 0xffffffff);
@@ -7439,10 +7480,13 @@ static void tg3_rings_reset(struct tg3 *tp)
7439 for (i = 1; i < TG3_IRQ_MAX_VECS; i++) { 7480 for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
7440 tp->napi[i].tx_prod = 0; 7481 tp->napi[i].tx_prod = 0;
7441 tp->napi[i].tx_cons = 0; 7482 tp->napi[i].tx_cons = 0;
7442 tw32_mailbox(tp->napi[i].prodmbox, 0); 7483 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
7484 tw32_mailbox(tp->napi[i].prodmbox, 0);
7443 tw32_rx_mbox(tp->napi[i].consmbox, 0); 7485 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7444 tw32_mailbox_f(tp->napi[i].int_mbox, 1); 7486 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7445 } 7487 }
7488 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
7489 tw32_mailbox(tp->napi[0].prodmbox, 0);
7446 } else { 7490 } else {
7447 tp->napi[0].tx_prod = 0; 7491 tp->napi[0].tx_prod = 0;
7448 tp->napi[0].tx_cons = 0; 7492 tp->napi[0].tx_cons = 0;
@@ -7528,8 +7572,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7528 tg3_abort_hw(tp, 1); 7572 tg3_abort_hw(tp, 1);
7529 } 7573 }
7530 7574
7531 if (reset_phy && 7575 if (reset_phy)
7532 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7533 tg3_phy_reset(tp); 7576 tg3_phy_reset(tp);
7534 7577
7535 err = tg3_chip_reset(tp); 7578 err = tg3_chip_reset(tp);
@@ -7574,6 +7617,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7574 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 7617 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7575 } 7618 }
7576 7619
7620 if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
7621 u32 grc_mode = tr32(GRC_MODE);
7622
7623 /* Access the lower 1K of PL PCIE block registers. */
7624 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7625 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7626
7627 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7628 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7629 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7630
7631 tw32(GRC_MODE, grc_mode);
7632 }
7633
7577 /* This works around an issue with Athlon chipsets on 7634 /* This works around an issue with Athlon chipsets on
7578 * B3 tigon3 silicon. This bit has no effect on any 7635 * B3 tigon3 silicon. This bit has no effect on any
7579 * other revision. But do not set this on PCI Express 7636 * other revision. But do not set this on PCI Express
@@ -7705,8 +7762,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7705 udelay(10); 7762 udelay(10);
7706 } 7763 }
7707 if (i >= 2000) { 7764 if (i >= 2000) {
7708 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n", 7765 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
7709 tp->dev->name);
7710 return -ENODEV; 7766 return -ENODEV;
7711 } 7767 }
7712 7768
@@ -7772,7 +7828,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7772 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 7828 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7773 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | 7829 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7774 BDINFO_FLAGS_USE_EXT_RECV); 7830 BDINFO_FLAGS_USE_EXT_RECV);
7775 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 7831 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7776 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 7832 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7777 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 7833 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7778 } else { 7834 } else {
@@ -7834,6 +7890,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7834 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 7890 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7835 RDMAC_MODE_LNGREAD_ENAB); 7891 RDMAC_MODE_LNGREAD_ENAB);
7836 7892
7893 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
7894 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
7895
7837 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 7896 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7838 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 7897 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7839 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 7898 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
@@ -8143,7 +8202,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8143 /* Prevent chip from dropping frames when flow control 8202 /* Prevent chip from dropping frames when flow control
8144 * is enabled. 8203 * is enabled.
8145 */ 8204 */
8146 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2); 8205 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8206 val = 1;
8207 else
8208 val = 2;
8209 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8147 8210
8148 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && 8211 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8149 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { 8212 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
@@ -8562,10 +8625,8 @@ static int tg3_test_msi(struct tg3 *tp)
8562 return err; 8625 return err;
8563 8626
8564 /* MSI test failed, go back to INTx mode */ 8627 /* MSI test failed, go back to INTx mode */
8565 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, " 8628 netdev_warn(tp->dev, "No interrupt was generated using MSI, switching to INTx mode\n"
8566 "switching to INTx mode. Please report this failure to " 8629 "Please report this failure to the PCI maintainer and include system chipset information\n");
8567 "the PCI maintainer and include system chipset information.\n",
8568 tp->dev->name);
8569 8630
8570 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 8631 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8571 8632
@@ -8598,8 +8659,8 @@ static int tg3_request_firmware(struct tg3 *tp)
8598 const __be32 *fw_data; 8659 const __be32 *fw_data;
8599 8660
8600 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { 8661 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
8601 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n", 8662 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
8602 tp->dev->name, tp->fw_needed); 8663 tp->fw_needed);
8603 return -ENOENT; 8664 return -ENOENT;
8604 } 8665 }
8605 8666
@@ -8612,8 +8673,8 @@ static int tg3_request_firmware(struct tg3 *tp)
8612 8673
8613 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */ 8674 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
8614 if (tp->fw_len < (tp->fw->size - 12)) { 8675 if (tp->fw_len < (tp->fw->size - 12)) {
8615 printk(KERN_ERR "%s: bogus length %d in \"%s\"\n", 8676 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
8616 tp->dev->name, tp->fw_len, tp->fw_needed); 8677 tp->fw_len, tp->fw_needed);
8617 release_firmware(tp->fw); 8678 release_firmware(tp->fw);
8618 tp->fw = NULL; 8679 tp->fw = NULL;
8619 return -EINVAL; 8680 return -EINVAL;
@@ -8651,9 +8712,8 @@ static bool tg3_enable_msix(struct tg3 *tp)
8651 return false; 8712 return false;
8652 if (pci_enable_msix(tp->pdev, msix_ent, rc)) 8713 if (pci_enable_msix(tp->pdev, msix_ent, rc))
8653 return false; 8714 return false;
8654 printk(KERN_NOTICE 8715 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
8655 "%s: Requested %d MSI-X vectors, received %d\n", 8716 tp->irq_cnt, rc);
8656 tp->dev->name, tp->irq_cnt, rc);
8657 tp->irq_cnt = rc; 8717 tp->irq_cnt = rc;
8658 } 8718 }
8659 8719
@@ -8678,8 +8738,7 @@ static void tg3_ints_init(struct tg3 *tp)
8678 /* All MSI supporting chips should support tagged 8738 /* All MSI supporting chips should support tagged
8679 * status. Assert that this is the case. 8739 * status. Assert that this is the case.
8680 */ 8740 */
8681 printk(KERN_WARNING PFX "%s: MSI without TAGGED? " 8741 netdev_warn(tp->dev, "MSI without TAGGED? Not using MSI\n");
8682 "Not using MSI.\n", tp->dev->name);
8683 goto defcfg; 8742 goto defcfg;
8684 } 8743 }
8685 8744
@@ -8724,12 +8783,10 @@ static int tg3_open(struct net_device *dev)
8724 if (err) 8783 if (err)
8725 return err; 8784 return err;
8726 } else if (err) { 8785 } else if (err) {
8727 printk(KERN_WARNING "%s: TSO capability disabled.\n", 8786 netdev_warn(tp->dev, "TSO capability disabled\n");
8728 tp->dev->name);
8729 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; 8787 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8730 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { 8788 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8731 printk(KERN_NOTICE "%s: TSO capability restored.\n", 8789 netdev_notice(tp->dev, "TSO capability restored\n");
8732 tp->dev->name);
8733 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 8790 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8734 } 8791 }
8735 } 8792 }
@@ -9395,21 +9452,18 @@ static void __tg3_set_rx_mode(struct net_device *dev)
9395 } else if (dev->flags & IFF_ALLMULTI) { 9452 } else if (dev->flags & IFF_ALLMULTI) {
9396 /* Accept all multicast. */ 9453 /* Accept all multicast. */
9397 tg3_set_multi (tp, 1); 9454 tg3_set_multi (tp, 1);
9398 } else if (dev->mc_count < 1) { 9455 } else if (netdev_mc_empty(dev)) {
9399 /* Reject all multicast. */ 9456 /* Reject all multicast. */
9400 tg3_set_multi (tp, 0); 9457 tg3_set_multi (tp, 0);
9401 } else { 9458 } else {
9402 /* Accept one or more multicast(s). */ 9459 /* Accept one or more multicast(s). */
9403 struct dev_mc_list *mclist; 9460 struct dev_mc_list *mclist;
9404 unsigned int i;
9405 u32 mc_filter[4] = { 0, }; 9461 u32 mc_filter[4] = { 0, };
9406 u32 regidx; 9462 u32 regidx;
9407 u32 bit; 9463 u32 bit;
9408 u32 crc; 9464 u32 crc;
9409 9465
9410 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 9466 netdev_for_each_mc_addr(mclist, dev) {
9411 i++, mclist = mclist->next) {
9412
9413 crc = calc_crc (mclist->dmi_addr, ETH_ALEN); 9467 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
9414 bit = ~crc & 0x7f; 9468 bit = ~crc & 0x7f;
9415 regidx = (bit & 0x60) >> 5; 9469 regidx = (bit & 0x60) >> 5;
@@ -10001,56 +10055,66 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
10001 int err = 0; 10055 int err = 0;
10002 10056
10003 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 10057 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10004 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 10058 u32 newadv;
10005 return -EAGAIN; 10059 struct phy_device *phydev;
10006 10060
10007 if (epause->autoneg) { 10061 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10008 u32 newadv;
10009 struct phy_device *phydev;
10010 10062
10011 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 10063 if (!(phydev->supported & SUPPORTED_Pause) ||
10064 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10065 ((epause->rx_pause && !epause->tx_pause) ||
10066 (!epause->rx_pause && epause->tx_pause))))
10067 return -EINVAL;
10012 10068
10013 if (epause->rx_pause) { 10069 tp->link_config.flowctrl = 0;
10014 if (epause->tx_pause) 10070 if (epause->rx_pause) {
10015 newadv = ADVERTISED_Pause; 10071 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10016 else 10072
10017 newadv = ADVERTISED_Pause | 10073 if (epause->tx_pause) {
10018 ADVERTISED_Asym_Pause; 10074 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10019 } else if (epause->tx_pause) { 10075 newadv = ADVERTISED_Pause;
10020 newadv = ADVERTISED_Asym_Pause;
10021 } else 10076 } else
10022 newadv = 0; 10077 newadv = ADVERTISED_Pause |
10023 10078 ADVERTISED_Asym_Pause;
10024 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { 10079 } else if (epause->tx_pause) {
10025 u32 oldadv = phydev->advertising & 10080 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10026 (ADVERTISED_Pause | 10081 newadv = ADVERTISED_Asym_Pause;
10027 ADVERTISED_Asym_Pause); 10082 } else
10028 if (oldadv != newadv) { 10083 newadv = 0;
10029 phydev->advertising &= 10084
10030 ~(ADVERTISED_Pause | 10085 if (epause->autoneg)
10031 ADVERTISED_Asym_Pause); 10086 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10032 phydev->advertising |= newadv; 10087 else
10033 err = phy_start_aneg(phydev); 10088 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
10089
10090 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
10091 u32 oldadv = phydev->advertising &
10092 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10093 if (oldadv != newadv) {
10094 phydev->advertising &=
10095 ~(ADVERTISED_Pause |
10096 ADVERTISED_Asym_Pause);
10097 phydev->advertising |= newadv;
10098 if (phydev->autoneg) {
10099 /*
10100 * Always renegotiate the link to
10101 * inform our link partner of our
10102 * flow control settings, even if the
10103 * flow control is forced. Let
10104 * tg3_adjust_link() do the final
10105 * flow control setup.
10106 */
10107 return phy_start_aneg(phydev);
10034 } 10108 }
10035 } else {
10036 tp->link_config.advertising &=
10037 ~(ADVERTISED_Pause |
10038 ADVERTISED_Asym_Pause);
10039 tp->link_config.advertising |= newadv;
10040 } 10109 }
10041 } else {
10042 if (epause->rx_pause)
10043 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10044 else
10045 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10046
10047 if (epause->tx_pause)
10048 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10049 else
10050 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10051 10110
10052 if (netif_running(dev)) 10111 if (!epause->autoneg)
10053 tg3_setup_flow_control(tp, 0, 0); 10112 tg3_setup_flow_control(tp, 0, 0);
10113 } else {
10114 tp->link_config.orig_advertising &=
10115 ~(ADVERTISED_Pause |
10116 ADVERTISED_Asym_Pause);
10117 tp->link_config.orig_advertising |= newadv;
10054 } 10118 }
10055 } else { 10119 } else {
10056 int irq_sync = 0; 10120 int irq_sync = 0;
@@ -10584,8 +10648,7 @@ static int tg3_test_registers(struct tg3 *tp)
10584 10648
10585out: 10649out:
10586 if (netif_msg_hw(tp)) 10650 if (netif_msg_hw(tp))
10587 printk(KERN_ERR PFX "Register test failed at offset %x\n", 10651 pr_err("Register test failed at offset %x\n", offset);
10588 offset);
10589 tw32(offset, save_val); 10652 tw32(offset, save_val);
10590 return -EIO; 10653 return -EIO;
10591} 10654}
@@ -10640,12 +10703,27 @@ static int tg3_test_memory(struct tg3 *tp)
10640 { 0x00008000, 0x01000}, 10703 { 0x00008000, 0x01000},
10641 { 0x00010000, 0x01000}, 10704 { 0x00010000, 0x01000},
10642 { 0xffffffff, 0x00000} 10705 { 0xffffffff, 0x00000}
10706 }, mem_tbl_5717[] = {
10707 { 0x00000200, 0x00008},
10708 { 0x00010000, 0x0a000},
10709 { 0x00020000, 0x13c00},
10710 { 0xffffffff, 0x00000}
10711 }, mem_tbl_57765[] = {
10712 { 0x00000200, 0x00008},
10713 { 0x00004000, 0x00800},
10714 { 0x00006000, 0x09800},
10715 { 0x00010000, 0x0a000},
10716 { 0xffffffff, 0x00000}
10643 }; 10717 };
10644 struct mem_entry *mem_tbl; 10718 struct mem_entry *mem_tbl;
10645 int err = 0; 10719 int err = 0;
10646 int i; 10720 int i;
10647 10721
10648 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 10722 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
10723 mem_tbl = mem_tbl_5717;
10724 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10725 mem_tbl = mem_tbl_57765;
10726 else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10649 mem_tbl = mem_tbl_5755; 10727 mem_tbl = mem_tbl_5755;
10650 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 10728 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10651 mem_tbl = mem_tbl_5906; 10729 mem_tbl = mem_tbl_5906;
@@ -10678,12 +10756,12 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10678 struct tg3_napi *tnapi, *rnapi; 10756 struct tg3_napi *tnapi, *rnapi;
10679 struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; 10757 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
10680 10758
10759 tnapi = &tp->napi[0];
10760 rnapi = &tp->napi[0];
10681 if (tp->irq_cnt > 1) { 10761 if (tp->irq_cnt > 1) {
10682 tnapi = &tp->napi[1];
10683 rnapi = &tp->napi[1]; 10762 rnapi = &tp->napi[1];
10684 } else { 10763 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
10685 tnapi = &tp->napi[0]; 10764 tnapi = &tp->napi[1];
10686 rnapi = &tp->napi[0];
10687 } 10765 }
10688 coal_now = tnapi->coal_now | rnapi->coal_now; 10766 coal_now = tnapi->coal_now | rnapi->coal_now;
10689 10767
@@ -10720,8 +10798,12 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10720 10798
10721 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; 10799 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10722 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { 10800 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10723 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 10801 tg3_writephy(tp, MII_TG3_FET_PTEST,
10724 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800); 10802 MII_TG3_FET_PTEST_FRC_TX_LINK |
10803 MII_TG3_FET_PTEST_FRC_TX_LOCK);
10804 /* The write needs to be flushed for the AC131 */
10805 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10806 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
10725 mac_mode |= MAC_MODE_PORT_MODE_MII; 10807 mac_mode |= MAC_MODE_PORT_MODE_MII;
10726 } else 10808 } else
10727 mac_mode |= MAC_MODE_PORT_MODE_GMII; 10809 mac_mode |= MAC_MODE_PORT_MODE_GMII;
@@ -10733,9 +10815,10 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10733 tw32_f(MAC_RX_MODE, tp->rx_mode); 10815 tw32_f(MAC_RX_MODE, tp->rx_mode);
10734 } 10816 }
10735 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { 10817 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10736 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) 10818 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
10819 if (masked_phy_id == TG3_PHY_ID_BCM5401)
10737 mac_mode &= ~MAC_MODE_LINK_POLARITY; 10820 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10738 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) 10821 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
10739 mac_mode |= MAC_MODE_LINK_POLARITY; 10822 mac_mode |= MAC_MODE_LINK_POLARITY;
10740 tg3_writephy(tp, MII_TG3_EXT_CTRL, 10823 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10741 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 10824 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
@@ -11692,8 +11775,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
11692 tp->tg3_flags |= TG3_FLAG_NVRAM; 11775 tp->tg3_flags |= TG3_FLAG_NVRAM;
11693 11776
11694 if (tg3_nvram_lock(tp)) { 11777 if (tg3_nvram_lock(tp)) {
11695 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, " 11778 netdev_warn(tp->dev, "Cannot get nvram lock, %s failed\n",
11696 "tg3_nvram_init failed.\n", tp->dev->name); 11779 __func__);
11697 return; 11780 return;
11698 } 11781 }
11699 tg3_enable_nvram_access(tp); 11782 tg3_enable_nvram_access(tp);
@@ -11991,45 +12074,71 @@ struct subsys_tbl_ent {
11991 u32 phy_id; 12074 u32 phy_id;
11992}; 12075};
11993 12076
11994static struct subsys_tbl_ent subsys_id_to_phy_id[] = { 12077static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
11995 /* Broadcom boards. */ 12078 /* Broadcom boards. */
11996 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */ 12079 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11997 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */ 12080 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
11998 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */ 12081 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11999 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */ 12082 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12000 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */ 12083 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12001 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */ 12084 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12002 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */ 12085 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12003 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */ 12086 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12004 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */ 12087 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12005 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */ 12088 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12006 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */ 12089 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12090 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12091 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12092 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12093 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12094 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12095 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12096 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12097 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12098 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12099 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12100 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12007 12101
12008 /* 3com boards. */ 12102 /* 3com boards. */
12009 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */ 12103 { TG3PCI_SUBVENDOR_ID_3COM,
12010 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */ 12104 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12011 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */ 12105 { TG3PCI_SUBVENDOR_ID_3COM,
12012 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */ 12106 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12013 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */ 12107 { TG3PCI_SUBVENDOR_ID_3COM,
12108 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12109 { TG3PCI_SUBVENDOR_ID_3COM,
12110 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12111 { TG3PCI_SUBVENDOR_ID_3COM,
12112 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12014 12113
12015 /* DELL boards. */ 12114 /* DELL boards. */
12016 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */ 12115 { TG3PCI_SUBVENDOR_ID_DELL,
12017 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */ 12116 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12018 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */ 12117 { TG3PCI_SUBVENDOR_ID_DELL,
12019 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */ 12118 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12119 { TG3PCI_SUBVENDOR_ID_DELL,
12120 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12121 { TG3PCI_SUBVENDOR_ID_DELL,
12122 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12020 12123
12021 /* Compaq boards. */ 12124 /* Compaq boards. */
12022 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */ 12125 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12023 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */ 12126 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12024 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */ 12127 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12025 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */ 12128 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12026 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */ 12129 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12130 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12131 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12132 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12133 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12134 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12027 12135
12028 /* IBM boards. */ 12136 /* IBM boards. */
12029 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */ 12137 { TG3PCI_SUBVENDOR_ID_IBM,
12138 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12030}; 12139};
12031 12140
12032static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp) 12141static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12033{ 12142{
12034 int i; 12143 int i;
12035 12144
@@ -12070,7 +12179,7 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12070 val = tr32(MEMARB_MODE); 12179 val = tr32(MEMARB_MODE);
12071 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 12180 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12072 12181
12073 tp->phy_id = PHY_ID_INVALID; 12182 tp->phy_id = TG3_PHY_ID_INVALID;
12074 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 12183 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12075 12184
12076 /* Assume an onboard device and WOL capable by default. */ 12185 /* Assume an onboard device and WOL capable by default. */
@@ -12244,8 +12353,8 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12244 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; 12353 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12245 } 12354 }
12246 12355
12247 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE) 12356 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12248 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE; 12357 tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE;
12249 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) 12358 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12250 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN; 12359 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
12251 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) 12360 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
@@ -12321,7 +12430,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
12321 err = 0; 12430 err = 0;
12322 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || 12431 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12323 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { 12432 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
12324 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID; 12433 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12325 } else { 12434 } else {
12326 /* Now read the physical PHY_ID from the chip and verify 12435 /* Now read the physical PHY_ID from the chip and verify
12327 * that it is sane. If it doesn't look good, we fall back 12436 * that it is sane. If it doesn't look good, we fall back
@@ -12335,17 +12444,17 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
12335 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; 12444 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12336 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; 12445 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12337 12446
12338 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK; 12447 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12339 } 12448 }
12340 12449
12341 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) { 12450 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12342 tp->phy_id = hw_phy_id; 12451 tp->phy_id = hw_phy_id;
12343 if (hw_phy_id_masked == PHY_ID_BCM8002) 12452 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12344 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; 12453 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12345 else 12454 else
12346 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES; 12455 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
12347 } else { 12456 } else {
12348 if (tp->phy_id != PHY_ID_INVALID) { 12457 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12349 /* Do nothing, phy ID already set up in 12458 /* Do nothing, phy ID already set up in
12350 * tg3_get_eeprom_hw_cfg(). 12459 * tg3_get_eeprom_hw_cfg().
12351 */ 12460 */
@@ -12355,13 +12464,13 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
12355 /* No eeprom signature? Try the hardcoded 12464 /* No eeprom signature? Try the hardcoded
12356 * subsys device table. 12465 * subsys device table.
12357 */ 12466 */
12358 p = lookup_by_subsys(tp); 12467 p = tg3_lookup_by_subsys(tp);
12359 if (!p) 12468 if (!p)
12360 return -ENODEV; 12469 return -ENODEV;
12361 12470
12362 tp->phy_id = p->phy_id; 12471 tp->phy_id = p->phy_id;
12363 if (!tp->phy_id || 12472 if (!tp->phy_id ||
12364 tp->phy_id == PHY_ID_BCM8002) 12473 tp->phy_id == TG3_PHY_ID_BCM8002)
12365 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; 12474 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12366 } 12475 }
12367 } 12476 }
@@ -12413,13 +12522,11 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
12413 } 12522 }
12414 12523
12415skip_phy_reset: 12524skip_phy_reset:
12416 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { 12525 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
12417 err = tg3_init_5401phy_dsp(tp); 12526 err = tg3_init_5401phy_dsp(tp);
12418 if (err) 12527 if (err)
12419 return err; 12528 return err;
12420 }
12421 12529
12422 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
12423 err = tg3_init_5401phy_dsp(tp); 12530 err = tg3_init_5401phy_dsp(tp);
12424 } 12531 }
12425 12532
@@ -12440,7 +12547,8 @@ skip_phy_reset:
12440static void __devinit tg3_read_partno(struct tg3 *tp) 12547static void __devinit tg3_read_partno(struct tg3 *tp)
12441{ 12548{
12442 unsigned char vpd_data[TG3_NVM_VPD_LEN]; /* in little-endian format */ 12549 unsigned char vpd_data[TG3_NVM_VPD_LEN]; /* in little-endian format */
12443 unsigned int i; 12550 unsigned int block_end, rosize, len;
12551 int i = 0;
12444 u32 magic; 12552 u32 magic;
12445 12553
12446 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || 12554 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
@@ -12462,7 +12570,7 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
12462 } 12570 }
12463 } else { 12571 } else {
12464 ssize_t cnt; 12572 ssize_t cnt;
12465 unsigned int pos = 0, i = 0; 12573 unsigned int pos = 0;
12466 12574
12467 for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) { 12575 for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) {
12468 cnt = pci_read_vpd(tp->pdev, pos, 12576 cnt = pci_read_vpd(tp->pdev, pos,
@@ -12477,51 +12585,33 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
12477 goto out_not_found; 12585 goto out_not_found;
12478 } 12586 }
12479 12587
12480 /* Now parse and find the part number. */ 12588 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
12481 for (i = 0; i < TG3_NVM_VPD_LEN - 2; ) { 12589 PCI_VPD_LRDT_RO_DATA);
12482 unsigned char val = vpd_data[i]; 12590 if (i < 0)
12483 unsigned int block_end; 12591 goto out_not_found;
12484
12485 if (val == 0x82 || val == 0x91) {
12486 i = (i + 3 +
12487 (vpd_data[i + 1] +
12488 (vpd_data[i + 2] << 8)));
12489 continue;
12490 }
12491
12492 if (val != 0x90)
12493 goto out_not_found;
12494 12592
12495 block_end = (i + 3 + 12593 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
12496 (vpd_data[i + 1] + 12594 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
12497 (vpd_data[i + 2] << 8))); 12595 i += PCI_VPD_LRDT_TAG_SIZE;
12498 i += 3;
12499 12596
12500 if (block_end > TG3_NVM_VPD_LEN) 12597 if (block_end > TG3_NVM_VPD_LEN)
12501 goto out_not_found; 12598 goto out_not_found;
12502 12599
12503 while (i < (block_end - 2)) { 12600 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12504 if (vpd_data[i + 0] == 'P' && 12601 PCI_VPD_RO_KEYWORD_PARTNO);
12505 vpd_data[i + 1] == 'N') { 12602 if (i < 0)
12506 int partno_len = vpd_data[i + 2]; 12603 goto out_not_found;
12507 12604
12508 i += 3; 12605 len = pci_vpd_info_field_size(&vpd_data[i]);
12509 if (partno_len > TG3_BPN_SIZE ||
12510 (partno_len + i) > TG3_NVM_VPD_LEN)
12511 goto out_not_found;
12512 12606
12513 memcpy(tp->board_part_number, 12607 i += PCI_VPD_INFO_FLD_HDR_SIZE;
12514 &vpd_data[i], partno_len); 12608 if (len > TG3_BPN_SIZE ||
12609 (len + i) > TG3_NVM_VPD_LEN)
12610 goto out_not_found;
12515 12611
12516 /* Success. */ 12612 memcpy(tp->board_part_number, &vpd_data[i], len);
12517 return;
12518 }
12519 i += 3 + vpd_data[i + 2];
12520 }
12521 12613
12522 /* Part number not found. */ 12614 return;
12523 goto out_not_found;
12524 }
12525 12615
12526out_not_found: 12616out_not_found:
12527 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 12617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
@@ -12538,8 +12628,24 @@ out_not_found:
12538 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && 12628 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12539 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) 12629 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
12540 strcpy(tp->board_part_number, "BCM57788"); 12630 strcpy(tp->board_part_number, "BCM57788");
12541 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 12631 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12632 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
12633 strcpy(tp->board_part_number, "BCM57761");
12634 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12635 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
12542 strcpy(tp->board_part_number, "BCM57765"); 12636 strcpy(tp->board_part_number, "BCM57765");
12637 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12638 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
12639 strcpy(tp->board_part_number, "BCM57781");
12640 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12641 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
12642 strcpy(tp->board_part_number, "BCM57785");
12643 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12644 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
12645 strcpy(tp->board_part_number, "BCM57791");
12646 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12647 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12648 strcpy(tp->board_part_number, "BCM57795");
12543 else 12649 else
12544 strcpy(tp->board_part_number, "none"); 12650 strcpy(tp->board_part_number, "none");
12545} 12651}
@@ -12642,6 +12748,12 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12642 case TG3_EEPROM_SB_REVISION_3: 12748 case TG3_EEPROM_SB_REVISION_3:
12643 offset = TG3_EEPROM_SB_F1R3_EDH_OFF; 12749 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
12644 break; 12750 break;
12751 case TG3_EEPROM_SB_REVISION_4:
12752 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
12753 break;
12754 case TG3_EEPROM_SB_REVISION_5:
12755 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
12756 break;
12645 default: 12757 default:
12646 return; 12758 return;
12647 } 12759 }
@@ -13102,6 +13214,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13102 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 || 13214 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13103 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1) 13215 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13104 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG; 13216 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
13217 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13218 tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
13105 } 13219 }
13106 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { 13220 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13107 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; 13221 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
@@ -13109,8 +13223,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13109 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { 13223 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13110 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); 13224 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13111 if (!tp->pcix_cap) { 13225 if (!tp->pcix_cap) {
13112 printk(KERN_ERR PFX "Cannot find PCI-X " 13226 pr_err("Cannot find PCI-X capability, aborting\n");
13113 "capability, aborting.\n");
13114 return -EIO; 13227 return -EIO;
13115 } 13228 }
13116 13229
@@ -13290,7 +13403,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13290 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 13403 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13291 13404
13292 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 13405 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13293 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 13406 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13407 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13294 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 13408 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13295 13409
13296 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 13410 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
@@ -13306,8 +13420,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13306 /* Force the chip into D0. */ 13420 /* Force the chip into D0. */
13307 err = tg3_set_power_state(tp, PCI_D0); 13421 err = tg3_set_power_state(tp, PCI_D0);
13308 if (err) { 13422 if (err) {
13309 printk(KERN_ERR PFX "(%s) transition to D0 failed\n", 13423 pr_err("(%s) transition to D0 failed\n", pci_name(tp->pdev));
13310 pci_name(tp->pdev));
13311 return err; 13424 return err;
13312 } 13425 }
13313 13426
@@ -13474,12 +13587,14 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13474 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F || 13587 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
13475 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) || 13588 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
13476 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 || 13589 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
13590 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13591 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13477 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) 13592 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
13478 tp->tg3_flags |= TG3_FLAG_10_100_ONLY; 13593 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
13479 13594
13480 err = tg3_phy_probe(tp); 13595 err = tg3_phy_probe(tp);
13481 if (err) { 13596 if (err) {
13482 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n", 13597 pr_err("(%s) phy probe failed, err %d\n",
13483 pci_name(tp->pdev), err); 13598 pci_name(tp->pdev), err);
13484 /* ... but do not return immediately ... */ 13599 /* ... but do not return immediately ... */
13485 tg3_mdio_fini(tp); 13600 tg3_mdio_fini(tp);
@@ -13989,7 +14104,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
13989 /* Send the buffer to the chip. */ 14104 /* Send the buffer to the chip. */
13990 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1); 14105 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13991 if (ret) { 14106 if (ret) {
13992 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret); 14107 pr_err("tg3_test_dma() Write the buffer failed %d\n",
14108 ret);
13993 break; 14109 break;
13994 } 14110 }
13995 14111
@@ -13999,7 +14115,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
13999 u32 val; 14115 u32 val;
14000 tg3_read_mem(tp, 0x2100 + (i*4), &val); 14116 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14001 if (le32_to_cpu(val) != p[i]) { 14117 if (le32_to_cpu(val) != p[i]) {
14002 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i); 14118 pr_err(" tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n",
14119 val, i);
14003 /* ret = -ENODEV here? */ 14120 /* ret = -ENODEV here? */
14004 } 14121 }
14005 p[i] = 0; 14122 p[i] = 0;
@@ -14008,7 +14125,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14008 /* Now read it back. */ 14125 /* Now read it back. */
14009 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0); 14126 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14010 if (ret) { 14127 if (ret) {
14011 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret); 14128 pr_err("tg3_test_dma() Read the buffer failed %d\n",
14129 ret);
14012 14130
14013 break; 14131 break;
14014 } 14132 }
@@ -14025,7 +14143,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14025 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 14143 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14026 break; 14144 break;
14027 } else { 14145 } else {
14028 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i); 14146 pr_err("tg3_test_dma() buffer corrupted on read back! (%d != %d)\n",
14147 p[i], i);
14029 ret = -ENODEV; 14148 ret = -ENODEV;
14030 goto out; 14149 goto out;
14031 } 14150 }
@@ -14086,9 +14205,22 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
14086 14205
14087static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) 14206static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14088{ 14207{
14089 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS && 14208 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14090 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && 14209 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14091 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { 14210 tp->bufmgr_config.mbuf_read_dma_low_water =
14211 DEFAULT_MB_RDMA_LOW_WATER_5705;
14212 tp->bufmgr_config.mbuf_mac_rx_low_water =
14213 DEFAULT_MB_MACRX_LOW_WATER_57765;
14214 tp->bufmgr_config.mbuf_high_water =
14215 DEFAULT_MB_HIGH_WATER_57765;
14216
14217 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14218 DEFAULT_MB_RDMA_LOW_WATER_5705;
14219 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14220 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14221 tp->bufmgr_config.mbuf_high_water_jumbo =
14222 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14223 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14092 tp->bufmgr_config.mbuf_read_dma_low_water = 14224 tp->bufmgr_config.mbuf_read_dma_low_water =
14093 DEFAULT_MB_RDMA_LOW_WATER_5705; 14225 DEFAULT_MB_RDMA_LOW_WATER_5705;
14094 tp->bufmgr_config.mbuf_mac_rx_low_water = 14226 tp->bufmgr_config.mbuf_mac_rx_low_water =
@@ -14130,26 +14262,28 @@ static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14130 14262
14131static char * __devinit tg3_phy_string(struct tg3 *tp) 14263static char * __devinit tg3_phy_string(struct tg3 *tp)
14132{ 14264{
14133 switch (tp->phy_id & PHY_ID_MASK) { 14265 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14134 case PHY_ID_BCM5400: return "5400"; 14266 case TG3_PHY_ID_BCM5400: return "5400";
14135 case PHY_ID_BCM5401: return "5401"; 14267 case TG3_PHY_ID_BCM5401: return "5401";
14136 case PHY_ID_BCM5411: return "5411"; 14268 case TG3_PHY_ID_BCM5411: return "5411";
14137 case PHY_ID_BCM5701: return "5701"; 14269 case TG3_PHY_ID_BCM5701: return "5701";
14138 case PHY_ID_BCM5703: return "5703"; 14270 case TG3_PHY_ID_BCM5703: return "5703";
14139 case PHY_ID_BCM5704: return "5704"; 14271 case TG3_PHY_ID_BCM5704: return "5704";
14140 case PHY_ID_BCM5705: return "5705"; 14272 case TG3_PHY_ID_BCM5705: return "5705";
14141 case PHY_ID_BCM5750: return "5750"; 14273 case TG3_PHY_ID_BCM5750: return "5750";
14142 case PHY_ID_BCM5752: return "5752"; 14274 case TG3_PHY_ID_BCM5752: return "5752";
14143 case PHY_ID_BCM5714: return "5714"; 14275 case TG3_PHY_ID_BCM5714: return "5714";
14144 case PHY_ID_BCM5780: return "5780"; 14276 case TG3_PHY_ID_BCM5780: return "5780";
14145 case PHY_ID_BCM5755: return "5755"; 14277 case TG3_PHY_ID_BCM5755: return "5755";
14146 case PHY_ID_BCM5787: return "5787"; 14278 case TG3_PHY_ID_BCM5787: return "5787";
14147 case PHY_ID_BCM5784: return "5784"; 14279 case TG3_PHY_ID_BCM5784: return "5784";
14148 case PHY_ID_BCM5756: return "5722/5756"; 14280 case TG3_PHY_ID_BCM5756: return "5722/5756";
14149 case PHY_ID_BCM5906: return "5906"; 14281 case TG3_PHY_ID_BCM5906: return "5906";
14150 case PHY_ID_BCM5761: return "5761"; 14282 case TG3_PHY_ID_BCM5761: return "5761";
14151 case PHY_ID_BCM5717: return "5717"; 14283 case TG3_PHY_ID_BCM5718C: return "5718C";
14152 case PHY_ID_BCM8002: return "8002/serdes"; 14284 case TG3_PHY_ID_BCM5718S: return "5718S";
14285 case TG3_PHY_ID_BCM57765: return "57765";
14286 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14153 case 0: return "serdes"; 14287 case 0: return "serdes";
14154 default: return "unknown"; 14288 default: return "unknown";
14155 } 14289 }
@@ -14291,7 +14425,6 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = {
14291static int __devinit tg3_init_one(struct pci_dev *pdev, 14425static int __devinit tg3_init_one(struct pci_dev *pdev,
14292 const struct pci_device_id *ent) 14426 const struct pci_device_id *ent)
14293{ 14427{
14294 static int tg3_version_printed = 0;
14295 struct net_device *dev; 14428 struct net_device *dev;
14296 struct tg3 *tp; 14429 struct tg3 *tp;
14297 int i, err, pm_cap; 14430 int i, err, pm_cap;
@@ -14299,20 +14432,17 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14299 char str[40]; 14432 char str[40];
14300 u64 dma_mask, persist_dma_mask; 14433 u64 dma_mask, persist_dma_mask;
14301 14434
14302 if (tg3_version_printed++ == 0) 14435 printk_once(KERN_INFO "%s\n", version);
14303 printk(KERN_INFO "%s", version);
14304 14436
14305 err = pci_enable_device(pdev); 14437 err = pci_enable_device(pdev);
14306 if (err) { 14438 if (err) {
14307 printk(KERN_ERR PFX "Cannot enable PCI device, " 14439 pr_err("Cannot enable PCI device, aborting\n");
14308 "aborting.\n");
14309 return err; 14440 return err;
14310 } 14441 }
14311 14442
14312 err = pci_request_regions(pdev, DRV_MODULE_NAME); 14443 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14313 if (err) { 14444 if (err) {
14314 printk(KERN_ERR PFX "Cannot obtain PCI resources, " 14445 pr_err("Cannot obtain PCI resources, aborting\n");
14315 "aborting.\n");
14316 goto err_out_disable_pdev; 14446 goto err_out_disable_pdev;
14317 } 14447 }
14318 14448
@@ -14321,15 +14451,14 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14321 /* Find power-management capability. */ 14451 /* Find power-management capability. */
14322 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 14452 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14323 if (pm_cap == 0) { 14453 if (pm_cap == 0) {
14324 printk(KERN_ERR PFX "Cannot find PowerManagement capability, " 14454 pr_err("Cannot find PowerManagement capability, aborting\n");
14325 "aborting.\n");
14326 err = -EIO; 14455 err = -EIO;
14327 goto err_out_free_res; 14456 goto err_out_free_res;
14328 } 14457 }
14329 14458
14330 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); 14459 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14331 if (!dev) { 14460 if (!dev) {
14332 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); 14461 pr_err("Etherdev alloc failed, aborting\n");
14333 err = -ENOMEM; 14462 err = -ENOMEM;
14334 goto err_out_free_res; 14463 goto err_out_free_res;
14335 } 14464 }
@@ -14379,8 +14508,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14379 14508
14380 tp->regs = pci_ioremap_bar(pdev, BAR_0); 14509 tp->regs = pci_ioremap_bar(pdev, BAR_0);
14381 if (!tp->regs) { 14510 if (!tp->regs) {
14382 printk(KERN_ERR PFX "Cannot map device registers, " 14511 netdev_err(dev, "Cannot map device registers, aborting\n");
14383 "aborting.\n");
14384 err = -ENOMEM; 14512 err = -ENOMEM;
14385 goto err_out_free_dev; 14513 goto err_out_free_dev;
14386 } 14514 }
@@ -14396,8 +14524,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14396 14524
14397 err = tg3_get_invariants(tp); 14525 err = tg3_get_invariants(tp);
14398 if (err) { 14526 if (err) {
14399 printk(KERN_ERR PFX "Problem fetching invariants of chip, " 14527 netdev_err(dev, "Problem fetching invariants of chip, aborting\n");
14400 "aborting.\n");
14401 goto err_out_iounmap; 14528 goto err_out_iounmap;
14402 } 14529 }
14403 14530
@@ -14432,8 +14559,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14432 err = pci_set_consistent_dma_mask(pdev, 14559 err = pci_set_consistent_dma_mask(pdev,
14433 persist_dma_mask); 14560 persist_dma_mask);
14434 if (err < 0) { 14561 if (err < 0) {
14435 printk(KERN_ERR PFX "Unable to obtain 64 bit " 14562 netdev_err(dev, "Unable to obtain 64 bit DMA for consistent allocations\n");
14436 "DMA for consistent allocations\n");
14437 goto err_out_iounmap; 14563 goto err_out_iounmap;
14438 } 14564 }
14439 } 14565 }
@@ -14441,8 +14567,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14441 if (err || dma_mask == DMA_BIT_MASK(32)) { 14567 if (err || dma_mask == DMA_BIT_MASK(32)) {
14442 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 14568 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
14443 if (err) { 14569 if (err) {
14444 printk(KERN_ERR PFX "No usable DMA configuration, " 14570 netdev_err(dev, "No usable DMA configuration, aborting\n");
14445 "aborting.\n");
14446 goto err_out_iounmap; 14571 goto err_out_iounmap;
14447 } 14572 }
14448 } 14573 }
@@ -14491,16 +14616,14 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14491 14616
14492 err = tg3_get_device_address(tp); 14617 err = tg3_get_device_address(tp);
14493 if (err) { 14618 if (err) {
14494 printk(KERN_ERR PFX "Could not obtain valid ethernet address, " 14619 netdev_err(dev, "Could not obtain valid ethernet address, aborting\n");
14495 "aborting.\n");
14496 goto err_out_iounmap; 14620 goto err_out_iounmap;
14497 } 14621 }
14498 14622
14499 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { 14623 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
14500 tp->aperegs = pci_ioremap_bar(pdev, BAR_2); 14624 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
14501 if (!tp->aperegs) { 14625 if (!tp->aperegs) {
14502 printk(KERN_ERR PFX "Cannot map APE registers, " 14626 netdev_err(dev, "Cannot map APE registers, aborting\n");
14503 "aborting.\n");
14504 err = -ENOMEM; 14627 err = -ENOMEM;
14505 goto err_out_iounmap; 14628 goto err_out_iounmap;
14506 } 14629 }
@@ -14524,7 +14647,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14524 14647
14525 err = tg3_test_dma(tp); 14648 err = tg3_test_dma(tp);
14526 if (err) { 14649 if (err) {
14527 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n"); 14650 netdev_err(dev, "DMA engine test failed, aborting\n");
14528 goto err_out_apeunmap; 14651 goto err_out_apeunmap;
14529 } 14652 }
14530 14653
@@ -14585,45 +14708,39 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14585 14708
14586 err = register_netdev(dev); 14709 err = register_netdev(dev);
14587 if (err) { 14710 if (err) {
14588 printk(KERN_ERR PFX "Cannot register net device, " 14711 netdev_err(dev, "Cannot register net device, aborting\n");
14589 "aborting.\n");
14590 goto err_out_apeunmap; 14712 goto err_out_apeunmap;
14591 } 14713 }
14592 14714
14593 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", 14715 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
14594 dev->name, 14716 tp->board_part_number,
14595 tp->board_part_number, 14717 tp->pci_chip_rev_id,
14596 tp->pci_chip_rev_id, 14718 tg3_bus_string(tp, str),
14597 tg3_bus_string(tp, str), 14719 dev->dev_addr);
14598 dev->dev_addr);
14599 14720
14600 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { 14721 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
14601 struct phy_device *phydev; 14722 struct phy_device *phydev;
14602 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 14723 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14603 printk(KERN_INFO 14724 netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14604 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", 14725 phydev->drv->name, dev_name(&phydev->dev));
14605 tp->dev->name, phydev->drv->name,
14606 dev_name(&phydev->dev));
14607 } else 14726 } else
14608 printk(KERN_INFO 14727 netdev_info(dev, "attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
14609 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n", 14728 tg3_phy_string(tp),
14610 tp->dev->name, tg3_phy_string(tp), 14729 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
14611 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" : 14730 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
14612 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" : 14731 "10/100/1000Base-T")),
14613 "10/100/1000Base-T")), 14732 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
14614 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0); 14733
14615 14734 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
14616 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", 14735 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
14617 dev->name, 14736 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
14618 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0, 14737 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
14619 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0, 14738 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
14620 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0, 14739 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
14621 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0, 14740 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
14622 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); 14741 tp->dma_rwctrl,
14623 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n", 14742 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
14624 dev->name, tp->dma_rwctrl, 14743 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
14625 (pdev->dma_mask == DMA_BIT_MASK(32)) ? 32 :
14626 (((u64) pdev->dma_mask == DMA_BIT_MASK(40)) ? 40 : 64));
14627 14744
14628 return 0; 14745 return 0;
14629 14746
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 8a167912902b..574a1cc4d353 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -56,7 +56,39 @@
56#define TG3PCI_DEVICE_TIGON3_57765 0x16b4 56#define TG3PCI_DEVICE_TIGON3_57765 0x16b4
57#define TG3PCI_DEVICE_TIGON3_57791 0x16b2 57#define TG3PCI_DEVICE_TIGON3_57791 0x16b2
58#define TG3PCI_DEVICE_TIGON3_57795 0x16b6 58#define TG3PCI_DEVICE_TIGON3_57795 0x16b6
59/* 0x04 --> 0x64 unused */ 59/* 0x04 --> 0x2c unused */
60#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
61#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644
62#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5 0x0001
63#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6 0x0002
64#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9 0x0003
65#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1 0x0005
66#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8 0x0006
67#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7 0x0007
68#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10 0x0008
69#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12 0x8008
70#define TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1 0x0009
71#define TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2 0x8009
72#define TG3PCI_SUBVENDOR_ID_3COM PCI_VENDOR_ID_3COM
73#define TG3PCI_SUBDEVICE_ID_3COM_3C996T 0x1000
74#define TG3PCI_SUBDEVICE_ID_3COM_3C996BT 0x1006
75#define TG3PCI_SUBDEVICE_ID_3COM_3C996SX 0x1004
76#define TG3PCI_SUBDEVICE_ID_3COM_3C1000T 0x1007
77#define TG3PCI_SUBDEVICE_ID_3COM_3C940BR01 0x1008
78#define TG3PCI_SUBVENDOR_ID_DELL PCI_VENDOR_ID_DELL
79#define TG3PCI_SUBDEVICE_ID_DELL_VIPER 0x00d1
80#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106
81#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109
82#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a
83#define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ
84#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c
85#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a
86#define TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING 0x007d
87#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780 0x0085
88#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2 0x0099
89#define TG3PCI_SUBVENDOR_ID_IBM PCI_VENDOR_ID_IBM
90#define TG3PCI_SUBDEVICE_ID_IBM_5703SAX2 0x0281
91/* 0x30 --> 0x64 unused */
60#define TG3PCI_MSI_DATA 0x00000064 92#define TG3PCI_MSI_DATA 0x00000064
61/* 0x66 --> 0x68 unused */ 93/* 0x66 --> 0x68 unused */
62#define TG3PCI_MISC_HOST_CTRL 0x00000068 94#define TG3PCI_MISC_HOST_CTRL 0x00000068
@@ -110,6 +142,7 @@
110#define CHIPREV_ID_57780_A0 0x57780000 142#define CHIPREV_ID_57780_A0 0x57780000
111#define CHIPREV_ID_57780_A1 0x57780001 143#define CHIPREV_ID_57780_A1 0x57780001
112#define CHIPREV_ID_5717_A0 0x05717000 144#define CHIPREV_ID_5717_A0 0x05717000
145#define CHIPREV_ID_57765_A0 0x57785000
113#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) 146#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
114#define ASIC_REV_5700 0x07 147#define ASIC_REV_5700 0x07
115#define ASIC_REV_5701 0x00 148#define ASIC_REV_5701 0x00
@@ -1206,14 +1239,18 @@
1206#define DEFAULT_MB_MACRX_LOW_WATER 0x00000020 1239#define DEFAULT_MB_MACRX_LOW_WATER 0x00000020
1207#define DEFAULT_MB_MACRX_LOW_WATER_5705 0x00000010 1240#define DEFAULT_MB_MACRX_LOW_WATER_5705 0x00000010
1208#define DEFAULT_MB_MACRX_LOW_WATER_5906 0x00000004 1241#define DEFAULT_MB_MACRX_LOW_WATER_5906 0x00000004
1242#define DEFAULT_MB_MACRX_LOW_WATER_57765 0x0000002a
1209#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO 0x00000098 1243#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO 0x00000098
1210#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780 0x0000004b 1244#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780 0x0000004b
1245#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765 0x0000007e
1211#define BUFMGR_MB_HIGH_WATER 0x00004418 1246#define BUFMGR_MB_HIGH_WATER 0x00004418
1212#define DEFAULT_MB_HIGH_WATER 0x00000060 1247#define DEFAULT_MB_HIGH_WATER 0x00000060
1213#define DEFAULT_MB_HIGH_WATER_5705 0x00000060 1248#define DEFAULT_MB_HIGH_WATER_5705 0x00000060
1214#define DEFAULT_MB_HIGH_WATER_5906 0x00000010 1249#define DEFAULT_MB_HIGH_WATER_5906 0x00000010
1250#define DEFAULT_MB_HIGH_WATER_57765 0x000000a0
1215#define DEFAULT_MB_HIGH_WATER_JUMBO 0x0000017c 1251#define DEFAULT_MB_HIGH_WATER_JUMBO 0x0000017c
1216#define DEFAULT_MB_HIGH_WATER_JUMBO_5780 0x00000096 1252#define DEFAULT_MB_HIGH_WATER_JUMBO_5780 0x00000096
1253#define DEFAULT_MB_HIGH_WATER_JUMBO_57765 0x000000ea
1217#define BUFMGR_RX_MB_ALLOC_REQ 0x0000441c 1254#define BUFMGR_RX_MB_ALLOC_REQ 0x0000441c
1218#define BUFMGR_MB_ALLOC_BIT 0x10000000 1255#define BUFMGR_MB_ALLOC_BIT 0x10000000
1219#define BUFMGR_RX_MB_ALLOC_RESP 0x00004420 1256#define BUFMGR_RX_MB_ALLOC_RESP 0x00004420
@@ -1253,6 +1290,7 @@
1253#define RDMAC_MODE_MBUF_SBD_CRPT_ENAB 0x00002000 1290#define RDMAC_MODE_MBUF_SBD_CRPT_ENAB 0x00002000
1254#define RDMAC_MODE_FIFO_SIZE_128 0x00020000 1291#define RDMAC_MODE_FIFO_SIZE_128 0x00020000
1255#define RDMAC_MODE_FIFO_LONG_BURST 0x00030000 1292#define RDMAC_MODE_FIFO_LONG_BURST 0x00030000
1293#define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000
1256#define RDMAC_MODE_IPV4_LSO_EN 0x08000000 1294#define RDMAC_MODE_IPV4_LSO_EN 0x08000000
1257#define RDMAC_MODE_IPV6_LSO_EN 0x10000000 1295#define RDMAC_MODE_IPV6_LSO_EN 0x10000000
1258#define RDMAC_STATUS 0x00004804 1296#define RDMAC_STATUS 0x00004804
@@ -1543,6 +1581,8 @@
1543#define GRC_MODE_HOST_SENDBDS 0x00020000 1581#define GRC_MODE_HOST_SENDBDS 0x00020000
1544#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000 1582#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000
1545#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000 1583#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000
1584#define GRC_MODE_PCIE_TL_SEL 0x00000000
1585#define GRC_MODE_PCIE_PL_SEL 0x00400000
1546#define GRC_MODE_NO_RX_PHDR_CSUM 0x00800000 1586#define GRC_MODE_NO_RX_PHDR_CSUM 0x00800000
1547#define GRC_MODE_IRQ_ON_TX_CPU_ATTN 0x01000000 1587#define GRC_MODE_IRQ_ON_TX_CPU_ATTN 0x01000000
1548#define GRC_MODE_IRQ_ON_RX_CPU_ATTN 0x02000000 1588#define GRC_MODE_IRQ_ON_RX_CPU_ATTN 0x02000000
@@ -1550,7 +1590,13 @@
1550#define GRC_MODE_IRQ_ON_DMA_ATTN 0x08000000 1590#define GRC_MODE_IRQ_ON_DMA_ATTN 0x08000000
1551#define GRC_MODE_IRQ_ON_FLOW_ATTN 0x10000000 1591#define GRC_MODE_IRQ_ON_FLOW_ATTN 0x10000000
1552#define GRC_MODE_4X_NIC_SEND_RINGS 0x20000000 1592#define GRC_MODE_4X_NIC_SEND_RINGS 0x20000000
1593#define GRC_MODE_PCIE_DL_SEL 0x20000000
1553#define GRC_MODE_MCAST_FRM_ENABLE 0x40000000 1594#define GRC_MODE_MCAST_FRM_ENABLE 0x40000000
1595#define GRC_MODE_PCIE_HI_1K_EN 0x80000000
1596#define GRC_MODE_PCIE_PORT_MASK (GRC_MODE_PCIE_TL_SEL | \
1597 GRC_MODE_PCIE_PL_SEL | \
1598 GRC_MODE_PCIE_DL_SEL | \
1599 GRC_MODE_PCIE_HI_1K_EN)
1554#define GRC_MISC_CFG 0x00006804 1600#define GRC_MISC_CFG 0x00006804
1555#define GRC_MISC_CFG_CORECLK_RESET 0x00000001 1601#define GRC_MISC_CFG_CORECLK_RESET 0x00000001
1556#define GRC_MISC_CFG_PRESCALAR_MASK 0x000000fe 1602#define GRC_MISC_CFG_PRESCALAR_MASK 0x000000fe
@@ -1804,6 +1850,11 @@
1804/* 0x7e74 --> 0x8000 unused */ 1850/* 0x7e74 --> 0x8000 unused */
1805 1851
1806 1852
1853/* Alternate PCIE definitions */
1854#define TG3_PCIE_TLDLPL_PORT 0x00007c00
1855#define TG3_PCIE_PL_LO_PHYCTL1 0x00000004
1856#define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000
1857
1807/* OTP bit definitions */ 1858/* OTP bit definitions */
1808#define TG3_OTP_AGCTGT_MASK 0x000000e0 1859#define TG3_OTP_AGCTGT_MASK 0x000000e0
1809#define TG3_OTP_AGCTGT_SHIFT 1 1860#define TG3_OTP_AGCTGT_SHIFT 1
@@ -1845,6 +1896,8 @@
1845#define TG3_EEPROM_SB_REVISION_0 0x00000000 1896#define TG3_EEPROM_SB_REVISION_0 0x00000000
1846#define TG3_EEPROM_SB_REVISION_2 0x00020000 1897#define TG3_EEPROM_SB_REVISION_2 0x00020000
1847#define TG3_EEPROM_SB_REVISION_3 0x00030000 1898#define TG3_EEPROM_SB_REVISION_3 0x00030000
1899#define TG3_EEPROM_SB_REVISION_4 0x00040000
1900#define TG3_EEPROM_SB_REVISION_5 0x00050000
1848#define TG3_EEPROM_MAGIC_HW 0xabcd 1901#define TG3_EEPROM_MAGIC_HW 0xabcd
1849#define TG3_EEPROM_MAGIC_HW_MSK 0xffff 1902#define TG3_EEPROM_MAGIC_HW_MSK 0xffff
1850 1903
@@ -1862,6 +1915,8 @@
1862#define TG3_EEPROM_SB_F1R2_EDH_OFF 0x14 1915#define TG3_EEPROM_SB_F1R2_EDH_OFF 0x14
1863#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 1916#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10
1864#define TG3_EEPROM_SB_F1R3_EDH_OFF 0x18 1917#define TG3_EEPROM_SB_F1R3_EDH_OFF 0x18
1918#define TG3_EEPROM_SB_F1R4_EDH_OFF 0x1c
1919#define TG3_EEPROM_SB_F1R5_EDH_OFF 0x20
1865#define TG3_EEPROM_SB_EDH_MAJ_MASK 0x00000700 1920#define TG3_EEPROM_SB_EDH_MAJ_MASK 0x00000700
1866#define TG3_EEPROM_SB_EDH_MAJ_SHFT 8 1921#define TG3_EEPROM_SB_EDH_MAJ_SHFT 8
1867#define TG3_EEPROM_SB_EDH_MIN_MASK 0x000000ff 1922#define TG3_EEPROM_SB_EDH_MIN_MASK 0x000000ff
@@ -1956,7 +2011,7 @@
1956 2011
1957#define NIC_SRAM_DATA_CFG_4 0x00000d60 2012#define NIC_SRAM_DATA_CFG_4 0x00000d60
1958#define NIC_SRAM_GMII_MODE 0x00000002 2013#define NIC_SRAM_GMII_MODE 0x00000002
1959#define NIC_SRAM_RGMII_STD_IBND_DISABLE 0x00000004 2014#define NIC_SRAM_RGMII_INBAND_DISABLE 0x00000004
1960#define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008 2015#define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008
1961#define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010 2016#define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010
1962 2017
@@ -2093,6 +2148,9 @@
2093 2148
2094/* Fast Ethernet Tranceiver definitions */ 2149/* Fast Ethernet Tranceiver definitions */
2095#define MII_TG3_FET_PTEST 0x17 2150#define MII_TG3_FET_PTEST 0x17
2151#define MII_TG3_FET_PTEST_FRC_TX_LINK 0x1000
2152#define MII_TG3_FET_PTEST_FRC_TX_LOCK 0x0800
2153
2096#define MII_TG3_FET_TEST 0x1f 2154#define MII_TG3_FET_TEST 0x1f
2097#define MII_TG3_FET_SHADOW_EN 0x0080 2155#define MII_TG3_FET_SHADOW_EN 0x0080
2098 2156
@@ -2682,6 +2740,7 @@ struct tg3 {
2682 struct net_device *dev; 2740 struct net_device *dev;
2683 struct pci_dev *pdev; 2741 struct pci_dev *pdev;
2684 2742
2743 u32 coal_now;
2685 u32 msg_enable; 2744 u32 msg_enable;
2686 2745
2687 /* begin "tx thread" cacheline section */ 2746 /* begin "tx thread" cacheline section */
@@ -2700,7 +2759,7 @@ struct tg3 {
2700 struct vlan_group *vlgrp; 2759 struct vlan_group *vlgrp;
2701#endif 2760#endif
2702 2761
2703 struct tg3_rx_prodring_set prodring[TG3_IRQ_MAX_VECS - 1]; 2762 struct tg3_rx_prodring_set prodring[TG3_IRQ_MAX_VECS];
2704 2763
2705 2764
2706 /* begin "everything else" cacheline(s) section */ 2765 /* begin "everything else" cacheline(s) section */
@@ -2798,7 +2857,7 @@ struct tg3 {
2798#define TG3_FLG3_USE_PHYLIB 0x00000010 2857#define TG3_FLG3_USE_PHYLIB 0x00000010
2799#define TG3_FLG3_MDIOBUS_INITED 0x00000020 2858#define TG3_FLG3_MDIOBUS_INITED 0x00000020
2800#define TG3_FLG3_PHY_CONNECTED 0x00000080 2859#define TG3_FLG3_PHY_CONNECTED 0x00000080
2801#define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100 2860#define TG3_FLG3_RGMII_INBAND_DISABLE 0x00000100
2802#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200 2861#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200
2803#define TG3_FLG3_RGMII_EXT_IBND_TX_EN 0x00000400 2862#define TG3_FLG3_RGMII_EXT_IBND_TX_EN 0x00000400
2804#define TG3_FLG3_CLKREQ_BUG 0x00000800 2863#define TG3_FLG3_CLKREQ_BUG 0x00000800
@@ -2812,6 +2871,7 @@ struct tg3 {
2812#define TG3_FLG3_40BIT_DMA_LIMIT_BUG 0x00100000 2871#define TG3_FLG3_40BIT_DMA_LIMIT_BUG 0x00100000
2813#define TG3_FLG3_SHORT_DMA_BUG 0x00200000 2872#define TG3_FLG3_SHORT_DMA_BUG 0x00200000
2814#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000 2873#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000
2874#define TG3_FLG3_L1PLLPD_EN 0x00800000
2815 2875
2816 struct timer_list timer; 2876 struct timer_list timer;
2817 u16 timer_counter; 2877 u16 timer_counter;
@@ -2861,42 +2921,50 @@ struct tg3 {
2861 2921
2862 /* PHY info */ 2922 /* PHY info */
2863 u32 phy_id; 2923 u32 phy_id;
2864#define PHY_ID_MASK 0xfffffff0 2924#define TG3_PHY_ID_MASK 0xfffffff0
2865#define PHY_ID_BCM5400 0x60008040 2925#define TG3_PHY_ID_BCM5400 0x60008040
2866#define PHY_ID_BCM5401 0x60008050 2926#define TG3_PHY_ID_BCM5401 0x60008050
2867#define PHY_ID_BCM5411 0x60008070 2927#define TG3_PHY_ID_BCM5411 0x60008070
2868#define PHY_ID_BCM5701 0x60008110 2928#define TG3_PHY_ID_BCM5701 0x60008110
2869#define PHY_ID_BCM5703 0x60008160 2929#define TG3_PHY_ID_BCM5703 0x60008160
2870#define PHY_ID_BCM5704 0x60008190 2930#define TG3_PHY_ID_BCM5704 0x60008190
2871#define PHY_ID_BCM5705 0x600081a0 2931#define TG3_PHY_ID_BCM5705 0x600081a0
2872#define PHY_ID_BCM5750 0x60008180 2932#define TG3_PHY_ID_BCM5750 0x60008180
2873#define PHY_ID_BCM5752 0x60008100 2933#define TG3_PHY_ID_BCM5752 0x60008100
2874#define PHY_ID_BCM5714 0x60008340 2934#define TG3_PHY_ID_BCM5714 0x60008340
2875#define PHY_ID_BCM5780 0x60008350 2935#define TG3_PHY_ID_BCM5780 0x60008350
2876#define PHY_ID_BCM5755 0xbc050cc0 2936#define TG3_PHY_ID_BCM5755 0xbc050cc0
2877#define PHY_ID_BCM5787 0xbc050ce0 2937#define TG3_PHY_ID_BCM5787 0xbc050ce0
2878#define PHY_ID_BCM5756 0xbc050ed0 2938#define TG3_PHY_ID_BCM5756 0xbc050ed0
2879#define PHY_ID_BCM5784 0xbc050fa0 2939#define TG3_PHY_ID_BCM5784 0xbc050fa0
2880#define PHY_ID_BCM5761 0xbc050fd0 2940#define TG3_PHY_ID_BCM5761 0xbc050fd0
2881#define PHY_ID_BCM5717 0x5c0d8a00 2941#define TG3_PHY_ID_BCM5718C 0x5c0d8a00
2882#define PHY_ID_BCM5906 0xdc00ac40 2942#define TG3_PHY_ID_BCM5718S 0xbc050ff0
2883#define PHY_ID_BCM8002 0x60010140 2943#define TG3_PHY_ID_BCM57765 0x5c0d8a40
2884#define PHY_ID_INVALID 0xffffffff 2944#define TG3_PHY_ID_BCM5906 0xdc00ac40
2885#define PHY_ID_REV_MASK 0x0000000f 2945#define TG3_PHY_ID_BCM8002 0x60010140
2886#define PHY_REV_BCM5401_B0 0x1 2946#define TG3_PHY_ID_INVALID 0xffffffff
2887#define PHY_REV_BCM5401_B2 0x3 2947
2888#define PHY_REV_BCM5401_C0 0x6 2948#define PHY_ID_RTL8211C 0x001cc910
2889#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */ 2949#define PHY_ID_RTL8201E 0x00008200
2890#define TG3_PHY_ID_BCM50610 0x143bd60 2950
2891#define TG3_PHY_ID_BCM50610M 0x143bd70 2951#define TG3_PHY_ID_REV_MASK 0x0000000f
2892#define TG3_PHY_ID_BCMAC131 0x143bc70 2952#define TG3_PHY_REV_BCM5401_B0 0x1
2893#define TG3_PHY_ID_RTL8211C 0x001cc910 2953
2894#define TG3_PHY_ID_RTL8201E 0x00008200 2954 /* This macro assumes the passed PHY ID is
2895#define TG3_PHY_ID_BCM57780 0x03625d90 2955 * already masked with TG3_PHY_ID_MASK.
2896#define TG3_PHY_OUI_MASK 0xfffffc00 2956 */
2897#define TG3_PHY_OUI_1 0x00206000 2957#define TG3_KNOWN_PHY_ID(X) \
2898#define TG3_PHY_OUI_2 0x0143bc00 2958 ((X) == TG3_PHY_ID_BCM5400 || (X) == TG3_PHY_ID_BCM5401 || \
2899#define TG3_PHY_OUI_3 0x03625c00 2959 (X) == TG3_PHY_ID_BCM5411 || (X) == TG3_PHY_ID_BCM5701 || \
2960 (X) == TG3_PHY_ID_BCM5703 || (X) == TG3_PHY_ID_BCM5704 || \
2961 (X) == TG3_PHY_ID_BCM5705 || (X) == TG3_PHY_ID_BCM5750 || \
2962 (X) == TG3_PHY_ID_BCM5752 || (X) == TG3_PHY_ID_BCM5714 || \
2963 (X) == TG3_PHY_ID_BCM5780 || (X) == TG3_PHY_ID_BCM5787 || \
2964 (X) == TG3_PHY_ID_BCM5755 || (X) == TG3_PHY_ID_BCM5756 || \
2965 (X) == TG3_PHY_ID_BCM5906 || (X) == TG3_PHY_ID_BCM5761 || \
2966 (X) == TG3_PHY_ID_BCM5718C || (X) == TG3_PHY_ID_BCM5718S || \
2967 (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM8002)
2900 2968
2901 u32 led_ctrl; 2969 u32 led_ctrl;
2902 u32 phy_otp; 2970 u32 phy_otp;
@@ -2909,20 +2977,6 @@ struct tg3 {
2909 u32 pci_clock_ctrl; 2977 u32 pci_clock_ctrl;
2910 struct pci_dev *pdev_peer; 2978 struct pci_dev *pdev_peer;
2911 2979
2912 /* This macro assumes the passed PHY ID is already masked
2913 * with PHY_ID_MASK.
2914 */
2915#define KNOWN_PHY_ID(X) \
2916 ((X) == PHY_ID_BCM5400 || (X) == PHY_ID_BCM5401 || \
2917 (X) == PHY_ID_BCM5411 || (X) == PHY_ID_BCM5701 || \
2918 (X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \
2919 (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \
2920 (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5714 || \
2921 (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \
2922 (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \
2923 (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM5761 || \
2924 (X) == PHY_ID_BCM5717 || (X) == PHY_ID_BCM8002)
2925
2926 struct tg3_hw_stats *hw_stats; 2980 struct tg3_hw_stats *hw_stats;
2927 dma_addr_t stats_mapping; 2981 dma_addr_t stats_mapping;
2928 struct work_struct reset_task; 2982 struct work_struct reset_task;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index fabaeffb3155..390540c101c7 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -254,7 +254,7 @@ static struct board {
254 { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */ 254 { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
255}; 255};
256 256
257static struct pci_device_id tlan_pci_tbl[] = { 257static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
258 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10, 258 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
259 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 259 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
260 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100, 260 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
@@ -338,7 +338,7 @@ static int TLan_PhyInternalService( struct net_device * );
338static int TLan_PhyDp83840aCheck( struct net_device * ); 338static int TLan_PhyDp83840aCheck( struct net_device * );
339*/ 339*/
340 340
341static int TLan_MiiReadReg( struct net_device *, u16, u16, u16 * ); 341static bool TLan_MiiReadReg( struct net_device *, u16, u16, u16 * );
342static void TLan_MiiSendData( u16, u32, unsigned ); 342static void TLan_MiiSendData( u16, u32, unsigned );
343static void TLan_MiiSync( u16 ); 343static void TLan_MiiSync( u16 );
344static void TLan_MiiWriteReg( struct net_device *, u16, u16, u16 ); 344static void TLan_MiiWriteReg( struct net_device *, u16, u16, u16 );
@@ -1314,7 +1314,7 @@ static struct net_device_stats *TLan_GetStats( struct net_device *dev )
1314 1314
1315static void TLan_SetMulticastList( struct net_device *dev ) 1315static void TLan_SetMulticastList( struct net_device *dev )
1316{ 1316{
1317 struct dev_mc_list *dmi = dev->mc_list; 1317 struct dev_mc_list *dmi;
1318 u32 hash1 = 0; 1318 u32 hash1 = 0;
1319 u32 hash2 = 0; 1319 u32 hash2 = 0;
1320 int i; 1320 int i;
@@ -1335,7 +1335,8 @@ static void TLan_SetMulticastList( struct net_device *dev )
1335 TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF ); 1335 TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF );
1336 TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF ); 1336 TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
1337 } else { 1337 } else {
1338 for ( i = 0; i < dev->mc_count; i++ ) { 1338 i = 0;
1339 netdev_for_each_mc_addr(dmi, dev) {
1339 if ( i < 3 ) { 1340 if ( i < 3 ) {
1340 TLan_SetMac( dev, i + 1, 1341 TLan_SetMac( dev, i + 1,
1341 (char *) &dmi->dmi_addr ); 1342 (char *) &dmi->dmi_addr );
@@ -1346,7 +1347,7 @@ static void TLan_SetMulticastList( struct net_device *dev )
1346 else 1347 else
1347 hash2 |= ( 1 << ( offset - 32 ) ); 1348 hash2 |= ( 1 << ( offset - 32 ) );
1348 } 1349 }
1349 dmi = dmi->next; 1350 i++;
1350 } 1351 }
1351 for ( ; i < 3; i++ ) 1352 for ( ; i < 3; i++ )
1352 TLan_SetMac( dev, i + 1, NULL ); 1353 TLan_SetMac( dev, i + 1, NULL );
@@ -2204,7 +2205,7 @@ TLan_ResetAdapter( struct net_device *dev )
2204 u32 data; 2205 u32 data;
2205 u8 data8; 2206 u8 data8;
2206 2207
2207 priv->tlanFullDuplex = FALSE; 2208 priv->tlanFullDuplex = false;
2208 priv->phyOnline=0; 2209 priv->phyOnline=0;
2209 netif_carrier_off(dev); 2210 netif_carrier_off(dev);
2210 2211
@@ -2259,7 +2260,7 @@ TLan_ResetAdapter( struct net_device *dev )
2259 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a ); 2260 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a );
2260 } else if ( priv->duplex == TLAN_DUPLEX_FULL ) { 2261 } else if ( priv->duplex == TLAN_DUPLEX_FULL ) {
2261 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 ); 2262 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 );
2262 priv->tlanFullDuplex = TRUE; 2263 priv->tlanFullDuplex = true;
2263 } else { 2264 } else {
2264 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 ); 2265 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 );
2265 } 2266 }
@@ -2651,14 +2652,14 @@ static void TLan_PhyStartLink( struct net_device *dev )
2651 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000); 2652 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000);
2652 } else if ( priv->speed == TLAN_SPEED_10 && 2653 } else if ( priv->speed == TLAN_SPEED_10 &&
2653 priv->duplex == TLAN_DUPLEX_FULL) { 2654 priv->duplex == TLAN_DUPLEX_FULL) {
2654 priv->tlanFullDuplex = TRUE; 2655 priv->tlanFullDuplex = true;
2655 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100); 2656 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100);
2656 } else if ( priv->speed == TLAN_SPEED_100 && 2657 } else if ( priv->speed == TLAN_SPEED_100 &&
2657 priv->duplex == TLAN_DUPLEX_HALF) { 2658 priv->duplex == TLAN_DUPLEX_HALF) {
2658 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000); 2659 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000);
2659 } else if ( priv->speed == TLAN_SPEED_100 && 2660 } else if ( priv->speed == TLAN_SPEED_100 &&
2660 priv->duplex == TLAN_DUPLEX_FULL) { 2661 priv->duplex == TLAN_DUPLEX_FULL) {
2661 priv->tlanFullDuplex = TRUE; 2662 priv->tlanFullDuplex = true;
2662 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100); 2663 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100);
2663 } else { 2664 } else {
2664 2665
@@ -2695,7 +2696,7 @@ static void TLan_PhyStartLink( struct net_device *dev )
2695 tctl &= ~TLAN_TC_AUISEL; 2696 tctl &= ~TLAN_TC_AUISEL;
2696 if ( priv->duplex == TLAN_DUPLEX_FULL ) { 2697 if ( priv->duplex == TLAN_DUPLEX_FULL ) {
2697 control |= MII_GC_DUPLEX; 2698 control |= MII_GC_DUPLEX;
2698 priv->tlanFullDuplex = TRUE; 2699 priv->tlanFullDuplex = true;
2699 } 2700 }
2700 if ( priv->speed == TLAN_SPEED_100 ) { 2701 if ( priv->speed == TLAN_SPEED_100 ) {
2701 control |= MII_GC_SPEEDSEL; 2702 control |= MII_GC_SPEEDSEL;
@@ -2750,9 +2751,9 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
2750 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa ); 2751 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa );
2751 mode = an_adv & an_lpa & 0x03E0; 2752 mode = an_adv & an_lpa & 0x03E0;
2752 if ( mode & 0x0100 ) { 2753 if ( mode & 0x0100 ) {
2753 priv->tlanFullDuplex = TRUE; 2754 priv->tlanFullDuplex = true;
2754 } else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) { 2755 } else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) {
2755 priv->tlanFullDuplex = TRUE; 2756 priv->tlanFullDuplex = true;
2756 } 2757 }
2757 2758
2758 if ( ( ! ( mode & 0x0180 ) ) && 2759 if ( ( ! ( mode & 0x0180 ) ) &&
@@ -2855,8 +2856,8 @@ void TLan_PhyMonitor( struct net_device *dev )
2855 * TLan_MiiReadReg 2856 * TLan_MiiReadReg
2856 * 2857 *
2857 * Returns: 2858 * Returns:
2858 * 0 if ack received ok 2859 * false if ack received ok
2859 * 1 otherwise. 2860 * true if no ack received or other error
2860 * 2861 *
2861 * Parms: 2862 * Parms:
2862 * dev The device structure containing 2863 * dev The device structure containing
@@ -2875,17 +2876,17 @@ void TLan_PhyMonitor( struct net_device *dev )
2875 * 2876 *
2876 **************************************************************/ 2877 **************************************************************/
2877 2878
2878static int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val ) 2879static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
2879{ 2880{
2880 u8 nack; 2881 u8 nack;
2881 u16 sio, tmp; 2882 u16 sio, tmp;
2882 u32 i; 2883 u32 i;
2883 int err; 2884 bool err;
2884 int minten; 2885 int minten;
2885 TLanPrivateInfo *priv = netdev_priv(dev); 2886 TLanPrivateInfo *priv = netdev_priv(dev);
2886 unsigned long flags = 0; 2887 unsigned long flags = 0;
2887 2888
2888 err = FALSE; 2889 err = false;
2889 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR); 2890 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2890 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; 2891 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2891 2892
@@ -2918,7 +2919,7 @@ static int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
2918 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); 2919 TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
2919 } 2920 }
2920 tmp = 0xffff; 2921 tmp = 0xffff;
2921 err = TRUE; 2922 err = true;
2922 } else { /* ACK, so read data */ 2923 } else { /* ACK, so read data */
2923 for (tmp = 0, i = 0x8000; i; i >>= 1) { 2924 for (tmp = 0, i = 0x8000; i; i >>= 1) {
2924 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); 2925 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index 4b82f283e985..d13ff12d7500 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -31,9 +31,6 @@
31 * 31 *
32 ****************************************************************/ 32 ****************************************************************/
33 33
34#define FALSE 0
35#define TRUE 1
36
37#define TLAN_MIN_FRAME_SIZE 64 34#define TLAN_MIN_FRAME_SIZE 64
38#define TLAN_MAX_FRAME_SIZE 1600 35#define TLAN_MAX_FRAME_SIZE 1600
39 36
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index cf552d1d9629..0fb930feea45 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -117,7 +117,7 @@ MODULE_PARM_DESC(message_level, "3c359: Level of reported messages") ;
117 * will be stuck with 1555 lines of hex #'s in the code. 117 * will be stuck with 1555 lines of hex #'s in the code.
118 */ 118 */
119 119
120static struct pci_device_id xl_pci_tbl[] = 120static DEFINE_PCI_DEVICE_TABLE(xl_pci_tbl) =
121{ 121{
122 {PCI_VENDOR_ID_3COM,PCI_DEVICE_ID_3COM_3C359, PCI_ANY_ID, PCI_ANY_ID, }, 122 {PCI_VENDOR_ID_3COM,PCI_DEVICE_ID_3COM_3C359, PCI_ANY_ID, PCI_ANY_ID, },
123 { } /* terminate list */ 123 { } /* terminate list */
@@ -1390,10 +1390,9 @@ static int xl_close(struct net_device *dev)
1390static void xl_set_rx_mode(struct net_device *dev) 1390static void xl_set_rx_mode(struct net_device *dev)
1391{ 1391{
1392 struct xl_private *xl_priv = netdev_priv(dev); 1392 struct xl_private *xl_priv = netdev_priv(dev);
1393 struct dev_mc_list *dmi ; 1393 struct dev_mc_list *dmi;
1394 unsigned char dev_mc_address[4] ; 1394 unsigned char dev_mc_address[4] ;
1395 u16 options ; 1395 u16 options ;
1396 int i ;
1397 1396
1398 if (dev->flags & IFF_PROMISC) 1397 if (dev->flags & IFF_PROMISC)
1399 options = 0x0004 ; 1398 options = 0x0004 ;
@@ -1408,7 +1407,7 @@ static void xl_set_rx_mode(struct net_device *dev)
1408 1407
1409 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; 1408 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1410 1409
1411 for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) { 1410 netdev_for_each_mc_addr(dmi, dev) {
1412 dev_mc_address[0] |= dmi->dmi_addr[2] ; 1411 dev_mc_address[0] |= dmi->dmi_addr[2] ;
1413 dev_mc_address[1] |= dmi->dmi_addr[3] ; 1412 dev_mc_address[1] |= dmi->dmi_addr[3] ;
1414 dev_mc_address[2] |= dmi->dmi_addr[4] ; 1413 dev_mc_address[2] |= dmi->dmi_addr[4] ;
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
index b9db1b5a58a3..515f122777ab 100644
--- a/drivers/net/tokenring/abyss.c
+++ b/drivers/net/tokenring/abyss.c
@@ -45,7 +45,7 @@ static char version[] __devinitdata =
45 45
46#define ABYSS_IO_EXTENT 64 46#define ABYSS_IO_EXTENT 64
47 47
48static struct pci_device_id abyss_pci_tbl[] = { 48static DEFINE_PCI_DEVICE_TABLE(abyss_pci_tbl) = {
49 { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_MK2, 49 { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_MK2,
50 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_TOKEN_RING << 8, 0x00ffffff, }, 50 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_TOKEN_RING << 8, 0x00ffffff, },
51 { } /* Terminating entry */ 51 { } /* Terminating entry */
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 66272f2a0758..1a0967246e2f 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -995,13 +995,11 @@ static void tok_set_multicast_list(struct net_device *dev)
995 /*BMS ifconfig tr down or hot unplug a PCMCIA card ??hownowbrowncow*/ 995 /*BMS ifconfig tr down or hot unplug a PCMCIA card ??hownowbrowncow*/
996 if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return; 996 if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return;
997 address[0] = address[1] = address[2] = address[3] = 0; 997 address[0] = address[1] = address[2] = address[3] = 0;
998 mclist = dev->mc_list; 998 netdev_for_each_mc_addr(mclist, dev) {
999 for (i = 0; i < dev->mc_count; i++) {
1000 address[0] |= mclist->dmi_addr[2]; 999 address[0] |= mclist->dmi_addr[2];
1001 address[1] |= mclist->dmi_addr[3]; 1000 address[1] |= mclist->dmi_addr[3];
1002 address[2] |= mclist->dmi_addr[4]; 1001 address[2] |= mclist->dmi_addr[4];
1003 address[3] |= mclist->dmi_addr[5]; 1002 address[3] |= mclist->dmi_addr[5];
1004 mclist = mclist->next;
1005 } 1003 }
1006 SET_PAGE(ti->srb_page); 1004 SET_PAGE(ti->srb_page);
1007 for (i = 0; i < sizeof(struct srb_set_funct_addr); i++) 1005 for (i = 0; i < sizeof(struct srb_set_funct_addr); i++)
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index d6ccd59c7d07..dd028fee9dc2 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -146,7 +146,7 @@
146static char version[] = "LanStreamer.c v0.4.0 03/08/01 - Mike Sullivan\n" 146static char version[] = "LanStreamer.c v0.4.0 03/08/01 - Mike Sullivan\n"
147 " v0.5.3 11/13/02 - Kent Yoder"; 147 " v0.5.3 11/13/02 - Kent Yoder";
148 148
149static struct pci_device_id streamer_pci_tbl[] = { 149static DEFINE_PCI_DEVICE_TABLE(streamer_pci_tbl) = {
150 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_TR, PCI_ANY_ID, PCI_ANY_ID,}, 150 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_TR, PCI_ANY_ID, PCI_ANY_ID,},
151 {} /* terminating entry */ 151 {} /* terminating entry */
152}; 152};
@@ -1268,7 +1268,6 @@ static void streamer_set_rx_mode(struct net_device *dev)
1268 __u8 options = 0; 1268 __u8 options = 0;
1269 struct dev_mc_list *dmi; 1269 struct dev_mc_list *dmi;
1270 unsigned char dev_mc_address[5]; 1270 unsigned char dev_mc_address[5];
1271 int i;
1272 1271
1273 writel(streamer_priv->srb, streamer_mmio + LAPA); 1272 writel(streamer_priv->srb, streamer_mmio + LAPA);
1274 options = streamer_priv->streamer_copy_all_options; 1273 options = streamer_priv->streamer_copy_all_options;
@@ -1303,8 +1302,7 @@ static void streamer_set_rx_mode(struct net_device *dev)
1303 writel(streamer_priv->srb,streamer_mmio+LAPA); 1302 writel(streamer_priv->srb,streamer_mmio+LAPA);
1304 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; 1303 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1305 1304
1306 for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) 1305 netdev_for_each_mc_addr(dmi, dev) {
1307 {
1308 dev_mc_address[0] |= dmi->dmi_addr[2] ; 1306 dev_mc_address[0] |= dmi->dmi_addr[2] ;
1309 dev_mc_address[1] |= dmi->dmi_addr[3] ; 1307 dev_mc_address[1] |= dmi->dmi_addr[3] ;
1310 dev_mc_address[2] |= dmi->dmi_addr[4] ; 1308 dev_mc_address[2] |= dmi->dmi_addr[4] ;
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index df32025c5132..3a25e0434ae2 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -172,7 +172,7 @@ module_param_array(message_level, int, NULL, 0) ;
172static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,}; 172static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
173module_param_array(network_monitor, int, NULL, 0); 173module_param_array(network_monitor, int, NULL, 0);
174 174
175static struct pci_device_id olympic_pci_tbl[] = { 175static DEFINE_PCI_DEVICE_TABLE(olympic_pci_tbl) = {
176 {PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,}, 176 {PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
177 { } /* Terminating Entry */ 177 { } /* Terminating Entry */
178}; 178};
@@ -1139,9 +1139,8 @@ static void olympic_set_rx_mode(struct net_device *dev)
1139 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ; 1139 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1140 u8 options = 0; 1140 u8 options = 0;
1141 u8 __iomem *srb; 1141 u8 __iomem *srb;
1142 struct dev_mc_list *dmi ; 1142 struct dev_mc_list *dmi;
1143 unsigned char dev_mc_address[4] ; 1143 unsigned char dev_mc_address[4] ;
1144 int i ;
1145 1144
1146 writel(olympic_priv->srb,olympic_mmio+LAPA); 1145 writel(olympic_priv->srb,olympic_mmio+LAPA);
1147 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800)); 1146 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
@@ -1178,7 +1177,7 @@ static void olympic_set_rx_mode(struct net_device *dev)
1178 1177
1179 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; 1178 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1180 1179
1181 for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) { 1180 netdev_for_each_mc_addr(dmi, dev) {
1182 dev_mc_address[0] |= dmi->dmi_addr[2] ; 1181 dev_mc_address[0] |= dmi->dmi_addr[2] ;
1183 dev_mc_address[1] |= dmi->dmi_addr[3] ; 1182 dev_mc_address[1] |= dmi->dmi_addr[3] ;
1184 dev_mc_address[2] |= dmi->dmi_addr[4] ; 1183 dev_mc_address[2] |= dmi->dmi_addr[4] ;
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index e3c42f5ac4a9..21a01753312a 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -1212,10 +1212,9 @@ static void tms380tr_set_multicast_list(struct net_device *dev)
1212 } 1212 }
1213 else 1213 else
1214 { 1214 {
1215 int i; 1215 struct dev_mc_list *mclist;
1216 struct dev_mc_list *mclist = dev->mc_list; 1216
1217 for (i=0; i< dev->mc_count; i++) 1217 netdev_for_each_mc_addr(mclist, dev) {
1218 {
1219 ((char *)(&tp->ocpl.FunctAddr))[0] |= 1218 ((char *)(&tp->ocpl.FunctAddr))[0] |=
1220 mclist->dmi_addr[2]; 1219 mclist->dmi_addr[2];
1221 ((char *)(&tp->ocpl.FunctAddr))[1] |= 1220 ((char *)(&tp->ocpl.FunctAddr))[1] |=
@@ -1224,7 +1223,6 @@ static void tms380tr_set_multicast_list(struct net_device *dev)
1224 mclist->dmi_addr[4]; 1223 mclist->dmi_addr[4];
1225 ((char *)(&tp->ocpl.FunctAddr))[3] |= 1224 ((char *)(&tp->ocpl.FunctAddr))[3] |=
1226 mclist->dmi_addr[5]; 1225 mclist->dmi_addr[5];
1227 mclist = mclist->next;
1228 } 1226 }
1229 } 1227 }
1230 tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR); 1228 tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR);
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index f92fe86fdcae..d4c7c0c0a3d6 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -57,7 +57,7 @@ static struct card_info card_info_table[] = {
57 { {0x03, 0x01}, "3Com Token Link Velocity"}, 57 { {0x03, 0x01}, "3Com Token Link Velocity"},
58}; 58};
59 59
60static struct pci_device_id tmspci_pci_tbl[] = { 60static DEFINE_PCI_DEVICE_TABLE(tmspci_pci_tbl) = {
61 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 61 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
62 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_TR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, 62 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_TR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
63 { PCI_VENDOR_ID_TCONRAD, PCI_DEVICE_ID_TCONRAD_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, 63 { PCI_VENDOR_ID_TCONRAD, PCI_DEVICE_ID_TCONRAD_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index a69c4a48bab9..647cdd1d4e20 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -1184,29 +1184,19 @@ static void tsi108_set_rx_mode(struct net_device *dev)
1184 1184
1185 rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE); 1185 rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE);
1186 1186
1187 if (dev->flags & IFF_ALLMULTI || dev->mc_count) { 1187 if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
1188 int i; 1188 int i;
1189 struct dev_mc_list *mc = dev->mc_list; 1189 struct dev_mc_list *mc;
1190 rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH; 1190 rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
1191 1191
1192 memset(data->mc_hash, 0, sizeof(data->mc_hash)); 1192 memset(data->mc_hash, 0, sizeof(data->mc_hash));
1193 1193
1194 while (mc) { 1194 netdev_for_each_mc_addr(mc, dev) {
1195 u32 hash, crc; 1195 u32 hash, crc;
1196 1196
1197 if (mc->dmi_addrlen == 6) { 1197 crc = ether_crc(6, mc->dmi_addr);
1198 crc = ether_crc(6, mc->dmi_addr); 1198 hash = crc >> 23;
1199 hash = crc >> 23; 1199 __set_bit(hash, &data->mc_hash[0]);
1200
1201 __set_bit(hash, &data->mc_hash[0]);
1202 } else {
1203 printk(KERN_ERR
1204 "%s: got multicast address of length %d instead of 6.\n",
1205 dev->name,
1206 mc->dmi_addrlen);
1207 }
1208
1209 mc = mc->next;
1210 } 1200 }
1211 1201
1212 TSI_WRITE(TSI108_EC_HASHADDR, 1202 TSI_WRITE(TSI108_EC_HASHADDR,
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c
index 9f6742fad6ca..007d8e75666d 100644
--- a/drivers/net/tulip/21142.c
+++ b/drivers/net/tulip/21142.c
@@ -43,8 +43,8 @@ void t21142_media_task(struct work_struct *work)
43 if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000) 43 if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
44 csr12 |= 6; 44 csr12 |= 6;
45 if (tulip_debug > 2) 45 if (tulip_debug > 2)
46 printk(KERN_INFO"%s: 21143 negotiation status %8.8x, %s.\n", 46 dev_info(&dev->dev, "21143 negotiation status %08x, %s\n",
47 dev->name, csr12, medianame[dev->if_port]); 47 csr12, medianame[dev->if_port]);
48 if (tulip_media_cap[dev->if_port] & MediaIsMII) { 48 if (tulip_media_cap[dev->if_port] & MediaIsMII) {
49 if (tulip_check_duplex(dev) < 0) { 49 if (tulip_check_duplex(dev) < 0) {
50 netif_carrier_off(dev); 50 netif_carrier_off(dev);
@@ -56,23 +56,26 @@ void t21142_media_task(struct work_struct *work)
56 } else if (tp->nwayset) { 56 } else if (tp->nwayset) {
57 /* Don't screw up a negotiated session! */ 57 /* Don't screw up a negotiated session! */
58 if (tulip_debug > 1) 58 if (tulip_debug > 1)
59 printk(KERN_INFO"%s: Using NWay-set %s media, csr12 %8.8x.\n", 59 dev_info(&dev->dev,
60 dev->name, medianame[dev->if_port], csr12); 60 "Using NWay-set %s media, csr12 %08x\n",
61 medianame[dev->if_port], csr12);
61 } else if (tp->medialock) { 62 } else if (tp->medialock) {
62 ; 63 ;
63 } else if (dev->if_port == 3) { 64 } else if (dev->if_port == 3) {
64 if (csr12 & 2) { /* No 100mbps link beat, revert to 10mbps. */ 65 if (csr12 & 2) { /* No 100mbps link beat, revert to 10mbps. */
65 if (tulip_debug > 1) 66 if (tulip_debug > 1)
66 printk(KERN_INFO"%s: No 21143 100baseTx link beat, %8.8x, " 67 dev_info(&dev->dev,
67 "trying NWay.\n", dev->name, csr12); 68 "No 21143 100baseTx link beat, %08x, trying NWay\n",
69 csr12);
68 t21142_start_nway(dev); 70 t21142_start_nway(dev);
69 next_tick = 3*HZ; 71 next_tick = 3*HZ;
70 } 72 }
71 } else if ((csr12 & 0x7000) != 0x5000) { 73 } else if ((csr12 & 0x7000) != 0x5000) {
72 /* Negotiation failed. Search media types. */ 74 /* Negotiation failed. Search media types. */
73 if (tulip_debug > 1) 75 if (tulip_debug > 1)
74 printk(KERN_INFO"%s: 21143 negotiation failed, status %8.8x.\n", 76 dev_info(&dev->dev,
75 dev->name, csr12); 77 "21143 negotiation failed, status %08x\n",
78 csr12);
76 if (!(csr12 & 4)) { /* 10mbps link beat good. */ 79 if (!(csr12 & 4)) { /* 10mbps link beat good. */
77 new_csr6 = 0x82420000; 80 new_csr6 = 0x82420000;
78 dev->if_port = 0; 81 dev->if_port = 0;
@@ -90,8 +93,8 @@ void t21142_media_task(struct work_struct *work)
90 iowrite32(1, ioaddr + CSR13); 93 iowrite32(1, ioaddr + CSR13);
91 } 94 }
92 if (tulip_debug > 1) 95 if (tulip_debug > 1)
93 printk(KERN_INFO"%s: Testing new 21143 media %s.\n", 96 dev_info(&dev->dev, "Testing new 21143 media %s\n",
94 dev->name, medianame[dev->if_port]); 97 medianame[dev->if_port]);
95 if (new_csr6 != (tp->csr6 & ~0x00D5)) { 98 if (new_csr6 != (tp->csr6 & ~0x00D5)) {
96 tp->csr6 &= 0x00D5; 99 tp->csr6 &= 0x00D5;
97 tp->csr6 |= new_csr6; 100 tp->csr6 |= new_csr6;
@@ -119,8 +122,8 @@ void t21142_start_nway(struct net_device *dev)
119 tp->nway = tp->mediasense = 1; 122 tp->nway = tp->mediasense = 1;
120 tp->nwayset = tp->lpar = 0; 123 tp->nwayset = tp->lpar = 0;
121 if (tulip_debug > 1) 124 if (tulip_debug > 1)
122 printk(KERN_DEBUG "%s: Restarting 21143 autonegotiation, csr14=%8.8x.\n", 125 printk(KERN_DEBUG "%s: Restarting 21143 autonegotiation, csr14=%08x\n",
123 dev->name, csr14); 126 dev->name, csr14);
124 iowrite32(0x0001, ioaddr + CSR13); 127 iowrite32(0x0001, ioaddr + CSR13);
125 udelay(100); 128 udelay(100);
126 iowrite32(csr14, ioaddr + CSR14); 129 iowrite32(csr14, ioaddr + CSR14);
@@ -147,8 +150,9 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
147 if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000) 150 if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
148 csr12 |= 6; 151 csr12 |= 6;
149 if (tulip_debug > 1) 152 if (tulip_debug > 1)
150 printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, " 153 dev_info(&dev->dev,
151 "%8.8x.\n", dev->name, csr12, csr5, csr14); 154 "21143 link status interrupt %08x, CSR5 %x, %08x\n",
155 csr12, csr5, csr14);
152 156
153 /* If NWay finished and we have a negotiated partner capability. */ 157 /* If NWay finished and we have a negotiated partner capability. */
154 if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) { 158 if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) {
@@ -171,14 +175,15 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
171 175
172 if (tulip_debug > 1) { 176 if (tulip_debug > 1) {
173 if (tp->nwayset) 177 if (tp->nwayset)
174 printk(KERN_INFO "%s: Switching to %s based on link " 178 dev_info(&dev->dev,
175 "negotiation %4.4x & %4.4x = %4.4x.\n", 179 "Switching to %s based on link negotiation %04x & %04x = %04x\n",
176 dev->name, medianame[dev->if_port], tp->sym_advertise, 180 medianame[dev->if_port],
177 tp->lpar, negotiated); 181 tp->sym_advertise, tp->lpar,
182 negotiated);
178 else 183 else
179 printk(KERN_INFO "%s: Autonegotiation failed, using %s," 184 dev_info(&dev->dev,
180 " link beat status %4.4x.\n", 185 "Autonegotiation failed, using %s, link beat status %04x\n",
181 dev->name, medianame[dev->if_port], csr12); 186 medianame[dev->if_port], csr12);
182 } 187 }
183 188
184 if (tp->mtable) { 189 if (tp->mtable) {
@@ -201,14 +206,14 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
201#if 0 /* Restart shouldn't be needed. */ 206#if 0 /* Restart shouldn't be needed. */
202 iowrite32(tp->csr6 | RxOn, ioaddr + CSR6); 207 iowrite32(tp->csr6 | RxOn, ioaddr + CSR6);
203 if (tulip_debug > 2) 208 if (tulip_debug > 2)
204 printk(KERN_DEBUG "%s: Restarting Tx and Rx, CSR5 is %8.8x.\n", 209 printk(KERN_DEBUG "%s: Restarting Tx and Rx, CSR5 is %08x\n",
205 dev->name, ioread32(ioaddr + CSR5)); 210 dev->name, ioread32(ioaddr + CSR5));
206#endif 211#endif
207 tulip_start_rxtx(tp); 212 tulip_start_rxtx(tp);
208 if (tulip_debug > 2) 213 if (tulip_debug > 2)
209 printk(KERN_DEBUG "%s: Setting CSR6 %8.8x/%x CSR12 %8.8x.\n", 214 printk(KERN_DEBUG "%s: Setting CSR6 %08x/%x CSR12 %08x\n",
210 dev->name, tp->csr6, ioread32(ioaddr + CSR6), 215 dev->name, tp->csr6, ioread32(ioaddr + CSR6),
211 ioread32(ioaddr + CSR12)); 216 ioread32(ioaddr + CSR12));
212 } else if ((tp->nwayset && (csr5 & 0x08000000) && 217 } else if ((tp->nwayset && (csr5 & 0x08000000) &&
213 (dev->if_port == 3 || dev->if_port == 5) && 218 (dev->if_port == 3 || dev->if_port == 5) &&
214 (csr12 & 2) == 2) || 219 (csr12 & 2) == 2) ||
@@ -220,9 +225,9 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
220 add_timer(&tp->timer); 225 add_timer(&tp->timer);
221 } else if (dev->if_port == 3 || dev->if_port == 5) { 226 } else if (dev->if_port == 3 || dev->if_port == 5) {
222 if (tulip_debug > 1) 227 if (tulip_debug > 1)
223 printk(KERN_INFO"%s: 21143 %s link beat %s.\n", 228 dev_info(&dev->dev, "21143 %s link beat %s\n",
224 dev->name, medianame[dev->if_port], 229 medianame[dev->if_port],
225 (csr12 & 2) ? "failed" : "good"); 230 (csr12 & 2) ? "failed" : "good");
226 if ((csr12 & 2) && ! tp->medialock) { 231 if ((csr12 & 2) && ! tp->medialock) {
227 del_timer_sync(&tp->timer); 232 del_timer_sync(&tp->timer);
228 t21142_start_nway(dev); 233 t21142_start_nway(dev);
@@ -232,21 +237,18 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
232 iowrite32(csr14 & ~0x080, ioaddr + CSR14); 237 iowrite32(csr14 & ~0x080, ioaddr + CSR14);
233 } else if (dev->if_port == 0 || dev->if_port == 4) { 238 } else if (dev->if_port == 0 || dev->if_port == 4) {
234 if ((csr12 & 4) == 0) 239 if ((csr12 & 4) == 0)
235 printk(KERN_INFO"%s: 21143 10baseT link beat good.\n", 240 dev_info(&dev->dev, "21143 10baseT link beat good\n");
236 dev->name);
237 } else if (!(csr12 & 4)) { /* 10mbps link beat good. */ 241 } else if (!(csr12 & 4)) { /* 10mbps link beat good. */
238 if (tulip_debug) 242 if (tulip_debug)
239 printk(KERN_INFO"%s: 21143 10mbps sensed media.\n", 243 dev_info(&dev->dev, "21143 10mbps sensed media\n");
240 dev->name);
241 dev->if_port = 0; 244 dev->if_port = 0;
242 } else if (tp->nwayset) { 245 } else if (tp->nwayset) {
243 if (tulip_debug) 246 if (tulip_debug)
244 printk(KERN_INFO"%s: 21143 using NWay-set %s, csr6 %8.8x.\n", 247 dev_info(&dev->dev, "21143 using NWay-set %s, csr6 %08x\n",
245 dev->name, medianame[dev->if_port], tp->csr6); 248 medianame[dev->if_port], tp->csr6);
246 } else { /* 100mbps link beat good. */ 249 } else { /* 100mbps link beat good. */
247 if (tulip_debug) 250 if (tulip_debug)
248 printk(KERN_INFO"%s: 21143 100baseTx sensed media.\n", 251 dev_info(&dev->dev, "21143 100baseTx sensed media\n");
249 dev->name);
250 dev->if_port = 3; 252 dev->if_port = 3;
251 tp->csr6 = 0x838E0000 | (tp->csr6 & 0x20ff); 253 tp->csr6 = 0x838E0000 | (tp->csr6 & 0x20ff);
252 iowrite32(0x0003FF7F, ioaddr + CSR14); 254 iowrite32(0x0003FF7F, ioaddr + CSR14);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index d4255d44cb75..cb429723b2c8 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -337,7 +337,7 @@ static void de21041_media_timer (unsigned long data);
337static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media); 337static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
338 338
339 339
340static struct pci_device_id de_pci_tbl[] = { 340static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = {
341 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP, 341 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
342 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 342 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
343 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS, 343 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
@@ -382,9 +382,9 @@ static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
382 /* Ingore earlier buffers. */ 382 /* Ingore earlier buffers. */
383 if ((status & 0xffff) != 0x7fff) { 383 if ((status & 0xffff) != 0x7fff) {
384 if (netif_msg_rx_err(de)) 384 if (netif_msg_rx_err(de))
385 printk(KERN_WARNING "%s: Oversized Ethernet frame " 385 dev_warn(&de->dev->dev,
386 "spanned multiple buffers, status %8.8x!\n", 386 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
387 de->dev->name, status); 387 status);
388 de->net_stats.rx_length_errors++; 388 de->net_stats.rx_length_errors++;
389 } 389 }
390 } else if (status & RxError) { 390 } else if (status & RxError) {
@@ -487,7 +487,7 @@ rx_next:
487 } 487 }
488 488
489 if (!rx_work) 489 if (!rx_work)
490 printk(KERN_WARNING "%s: rx work limit reached\n", de->dev->name); 490 dev_warn(&de->dev->dev, "rx work limit reached\n");
491 491
492 de->rx_tail = rx_tail; 492 de->rx_tail = rx_tail;
493} 493}
@@ -504,7 +504,8 @@ static irqreturn_t de_interrupt (int irq, void *dev_instance)
504 504
505 if (netif_msg_intr(de)) 505 if (netif_msg_intr(de))
506 printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n", 506 printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
507 dev->name, status, dr32(MacMode), de->rx_tail, de->tx_head, de->tx_tail); 507 dev->name, status, dr32(MacMode),
508 de->rx_tail, de->tx_head, de->tx_tail);
508 509
509 dw32(MacStatus, status); 510 dw32(MacStatus, status);
510 511
@@ -529,8 +530,9 @@ static irqreturn_t de_interrupt (int irq, void *dev_instance)
529 530
530 pci_read_config_word(de->pdev, PCI_STATUS, &pci_status); 531 pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
531 pci_write_config_word(de->pdev, PCI_STATUS, pci_status); 532 pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
532 printk(KERN_ERR "%s: PCI bus error, status=%08x, PCI status=%04x\n", 533 dev_err(&de->dev->dev,
533 dev->name, status, pci_status); 534 "PCI bus error, status=%08x, PCI status=%04x\n",
535 status, pci_status);
534 } 536 }
535 537
536 return IRQ_HANDLED; 538 return IRQ_HANDLED;
@@ -582,7 +584,8 @@ static void de_tx (struct de_private *de)
582 de->net_stats.tx_packets++; 584 de->net_stats.tx_packets++;
583 de->net_stats.tx_bytes += skb->len; 585 de->net_stats.tx_bytes += skb->len;
584 if (netif_msg_tx_done(de)) 586 if (netif_msg_tx_done(de))
585 printk(KERN_DEBUG "%s: tx done, slot %d\n", de->dev->name, tx_tail); 587 printk(KERN_DEBUG "%s: tx done, slot %d\n",
588 de->dev->name, tx_tail);
586 } 589 }
587 dev_kfree_skb_irq(skb); 590 dev_kfree_skb_irq(skb);
588 } 591 }
@@ -674,18 +677,17 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
674 memset(hash_table, 0, sizeof(hash_table)); 677 memset(hash_table, 0, sizeof(hash_table));
675 set_bit_le(255, hash_table); /* Broadcast entry */ 678 set_bit_le(255, hash_table); /* Broadcast entry */
676 /* This should work on big-endian machines as well. */ 679 /* This should work on big-endian machines as well. */
677 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 680 netdev_for_each_mc_addr(mclist, dev) {
678 i++, mclist = mclist->next) {
679 int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff; 681 int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
680 682
681 set_bit_le(index, hash_table); 683 set_bit_le(index, hash_table);
684 }
682 685
683 for (i = 0; i < 32; i++) { 686 for (i = 0; i < 32; i++) {
684 *setup_frm++ = hash_table[i]; 687 *setup_frm++ = hash_table[i];
685 *setup_frm++ = hash_table[i]; 688 *setup_frm++ = hash_table[i];
686 }
687 setup_frm = &de->setup_frame[13*6];
688 } 689 }
690 setup_frm = &de->setup_frame[13*6];
689 691
690 /* Fill the final entry with our physical address. */ 692 /* Fill the final entry with our physical address. */
691 eaddrs = (u16 *)dev->dev_addr; 693 eaddrs = (u16 *)dev->dev_addr;
@@ -698,20 +700,18 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
698{ 700{
699 struct de_private *de = netdev_priv(dev); 701 struct de_private *de = netdev_priv(dev);
700 struct dev_mc_list *mclist; 702 struct dev_mc_list *mclist;
701 int i;
702 u16 *eaddrs; 703 u16 *eaddrs;
703 704
704 /* We have <= 14 addresses so we can use the wonderful 705 /* We have <= 14 addresses so we can use the wonderful
705 16 address perfect filtering of the Tulip. */ 706 16 address perfect filtering of the Tulip. */
706 for (i = 0, mclist = dev->mc_list; i < dev->mc_count; 707 netdev_for_each_mc_addr(mclist, dev) {
707 i++, mclist = mclist->next) {
708 eaddrs = (u16 *)mclist->dmi_addr; 708 eaddrs = (u16 *)mclist->dmi_addr;
709 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; 709 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
710 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; 710 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
711 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; 711 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
712 } 712 }
713 /* Fill the unused entries with the broadcast address. */ 713 /* Fill the unused entries with the broadcast address. */
714 memset(setup_frm, 0xff, (15-i)*12); 714 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
715 setup_frm = &de->setup_frame[15*6]; 715 setup_frm = &de->setup_frame[15*6];
716 716
717 /* Fill the final entry with our physical address. */ 717 /* Fill the final entry with our physical address. */
@@ -738,7 +738,7 @@ static void __de_set_rx_mode (struct net_device *dev)
738 goto out; 738 goto out;
739 } 739 }
740 740
741 if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) { 741 if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
742 /* Too many to filter well -- accept all multicasts. */ 742 /* Too many to filter well -- accept all multicasts. */
743 macmode |= AcceptAllMulticast; 743 macmode |= AcceptAllMulticast;
744 goto out; 744 goto out;
@@ -746,7 +746,7 @@ static void __de_set_rx_mode (struct net_device *dev)
746 746
747 /* Note that only the low-address shortword of setup_frame is valid! 747 /* Note that only the low-address shortword of setup_frame is valid!
748 The values are doubled for big-endian architectures. */ 748 The values are doubled for big-endian architectures. */
749 if (dev->mc_count > 14) /* Must use a multicast hash table. */ 749 if (netdev_mc_count(dev) > 14) /* Must use a multicast hash table. */
750 build_setup_frame_hash (de->setup_frame, dev); 750 build_setup_frame_hash (de->setup_frame, dev);
751 else 751 else
752 build_setup_frame_perfect (de->setup_frame, dev); 752 build_setup_frame_perfect (de->setup_frame, dev);
@@ -870,7 +870,7 @@ static void de_stop_rxtx (struct de_private *de)
870 udelay(100); 870 udelay(100);
871 } 871 }
872 872
873 printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name); 873 dev_warn(&de->dev->dev, "timeout expired stopping DMA\n");
874} 874}
875 875
876static inline void de_start_rxtx (struct de_private *de) 876static inline void de_start_rxtx (struct de_private *de)
@@ -905,8 +905,8 @@ static void de_link_up(struct de_private *de)
905 if (!netif_carrier_ok(de->dev)) { 905 if (!netif_carrier_ok(de->dev)) {
906 netif_carrier_on(de->dev); 906 netif_carrier_on(de->dev);
907 if (netif_msg_link(de)) 907 if (netif_msg_link(de))
908 printk(KERN_INFO "%s: link up, media %s\n", 908 dev_info(&de->dev->dev, "link up, media %s\n",
909 de->dev->name, media_name[de->media_type]); 909 media_name[de->media_type]);
910 } 910 }
911} 911}
912 912
@@ -915,7 +915,7 @@ static void de_link_down(struct de_private *de)
915 if (netif_carrier_ok(de->dev)) { 915 if (netif_carrier_ok(de->dev)) {
916 netif_carrier_off(de->dev); 916 netif_carrier_off(de->dev);
917 if (netif_msg_link(de)) 917 if (netif_msg_link(de))
918 printk(KERN_INFO "%s: link down\n", de->dev->name); 918 dev_info(&de->dev->dev, "link down\n");
919 } 919 }
920} 920}
921 921
@@ -925,7 +925,8 @@ static void de_set_media (struct de_private *de)
925 u32 macmode = dr32(MacMode); 925 u32 macmode = dr32(MacMode);
926 926
927 if (de_is_running(de)) 927 if (de_is_running(de))
928 printk(KERN_WARNING "%s: chip is running while changing media!\n", de->dev->name); 928 dev_warn(&de->dev->dev,
929 "chip is running while changing media!\n");
929 930
930 if (de->de21040) 931 if (de->de21040)
931 dw32(CSR11, FULL_DUPLEX_MAGIC); 932 dw32(CSR11, FULL_DUPLEX_MAGIC);
@@ -945,15 +946,15 @@ static void de_set_media (struct de_private *de)
945 macmode &= ~FullDuplex; 946 macmode &= ~FullDuplex;
946 947
947 if (netif_msg_link(de)) { 948 if (netif_msg_link(de)) {
948 printk(KERN_INFO 949 dev_info(&de->dev->dev, "set link %s\n", media_name[media]);
949 "%s: set link %s\n" 950 dev_info(&de->dev->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
950 "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n" 951 dr32(MacMode), dr32(SIAStatus),
951 "%s: set mode 0x%x, set sia 0x%x,0x%x,0x%x\n", 952 dr32(CSR13), dr32(CSR14), dr32(CSR15));
952 de->dev->name, media_name[media], 953
953 de->dev->name, dr32(MacMode), dr32(SIAStatus), 954 dev_info(&de->dev->dev,
954 dr32(CSR13), dr32(CSR14), dr32(CSR15), 955 "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
955 de->dev->name, macmode, de->media[media].csr13, 956 macmode, de->media[media].csr13,
956 de->media[media].csr14, de->media[media].csr15); 957 de->media[media].csr14, de->media[media].csr15);
957 } 958 }
958 if (macmode != dr32(MacMode)) 959 if (macmode != dr32(MacMode))
959 dw32(MacMode, macmode); 960 dw32(MacMode, macmode);
@@ -992,9 +993,8 @@ static void de21040_media_timer (unsigned long data)
992 de_link_up(de); 993 de_link_up(de);
993 else 994 else
994 if (netif_msg_timer(de)) 995 if (netif_msg_timer(de))
995 printk(KERN_INFO "%s: %s link ok, status %x\n", 996 dev_info(&dev->dev, "%s link ok, status %x\n",
996 dev->name, media_name[de->media_type], 997 media_name[de->media_type], status);
997 status);
998 return; 998 return;
999 } 999 }
1000 1000
@@ -1022,8 +1022,8 @@ no_link_yet:
1022 add_timer(&de->media_timer); 1022 add_timer(&de->media_timer);
1023 1023
1024 if (netif_msg_timer(de)) 1024 if (netif_msg_timer(de))
1025 printk(KERN_INFO "%s: no link, trying media %s, status %x\n", 1025 dev_info(&dev->dev, "no link, trying media %s, status %x\n",
1026 dev->name, media_name[de->media_type], status); 1026 media_name[de->media_type], status);
1027} 1027}
1028 1028
1029static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media) 1029static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
@@ -1079,9 +1079,10 @@ static void de21041_media_timer (unsigned long data)
1079 de_link_up(de); 1079 de_link_up(de);
1080 else 1080 else
1081 if (netif_msg_timer(de)) 1081 if (netif_msg_timer(de))
1082 printk(KERN_INFO "%s: %s link ok, mode %x status %x\n", 1082 dev_info(&dev->dev,
1083 dev->name, media_name[de->media_type], 1083 "%s link ok, mode %x status %x\n",
1084 dr32(MacMode), status); 1084 media_name[de->media_type],
1085 dr32(MacMode), status);
1085 return; 1086 return;
1086 } 1087 }
1087 1088
@@ -1150,8 +1151,8 @@ no_link_yet:
1150 add_timer(&de->media_timer); 1151 add_timer(&de->media_timer);
1151 1152
1152 if (netif_msg_timer(de)) 1153 if (netif_msg_timer(de))
1153 printk(KERN_INFO "%s: no link, trying media %s, status %x\n", 1154 dev_info(&dev->dev, "no link, trying media %s, status %x\n",
1154 dev->name, media_name[de->media_type], status); 1155 media_name[de->media_type], status);
1155} 1156}
1156 1157
1157static void de_media_interrupt (struct de_private *de, u32 status) 1158static void de_media_interrupt (struct de_private *de, u32 status)
@@ -1378,8 +1379,7 @@ static int de_open (struct net_device *dev)
1378 1379
1379 rc = de_alloc_rings(de); 1380 rc = de_alloc_rings(de);
1380 if (rc) { 1381 if (rc) {
1381 printk(KERN_ERR "%s: ring allocation failure, err=%d\n", 1382 dev_err(&dev->dev, "ring allocation failure, err=%d\n", rc);
1382 dev->name, rc);
1383 return rc; 1383 return rc;
1384 } 1384 }
1385 1385
@@ -1387,15 +1387,14 @@ static int de_open (struct net_device *dev)
1387 1387
1388 rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev); 1388 rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1389 if (rc) { 1389 if (rc) {
1390 printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n", 1390 dev_err(&dev->dev, "IRQ %d request failure, err=%d\n",
1391 dev->name, dev->irq, rc); 1391 dev->irq, rc);
1392 goto err_out_free; 1392 goto err_out_free;
1393 } 1393 }
1394 1394
1395 rc = de_init_hw(de); 1395 rc = de_init_hw(de);
1396 if (rc) { 1396 if (rc) {
1397 printk(KERN_ERR "%s: h/w init failure, err=%d\n", 1397 dev_err(&dev->dev, "h/w init failure, err=%d\n", rc);
1398 dev->name, rc);
1399 goto err_out_free_irq; 1398 goto err_out_free_irq;
1400 } 1399 }
1401 1400
@@ -1666,8 +1665,8 @@ static int de_nway_reset(struct net_device *dev)
1666 status = dr32(SIAStatus); 1665 status = dr32(SIAStatus);
1667 dw32(SIAStatus, (status & ~NWayState) | NWayRestart); 1666 dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1668 if (netif_msg_link(de)) 1667 if (netif_msg_link(de))
1669 printk(KERN_INFO "%s: link nway restart, status %x,%x\n", 1668 dev_info(&de->dev->dev, "link nway restart, status %x,%x\n",
1670 de->dev->name, status, dr32(SIAStatus)); 1669 status, dr32(SIAStatus));
1671 return 0; 1670 return 0;
1672} 1671}
1673 1672
@@ -1711,7 +1710,7 @@ static void __devinit de21040_get_mac_address (struct de_private *de)
1711 de->dev->dev_addr[i] = value; 1710 de->dev->dev_addr[i] = value;
1712 udelay(1); 1711 udelay(1);
1713 if (boguscnt <= 0) 1712 if (boguscnt <= 0)
1714 printk(KERN_WARNING PFX "timeout reading 21040 MAC address byte %u\n", i); 1713 pr_warning(PFX "timeout reading 21040 MAC address byte %u\n", i);
1715 } 1714 }
1716} 1715}
1717 1716
@@ -1830,9 +1829,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
1830 } 1829 }
1831 1830
1832 if (netif_msg_probe(de)) 1831 if (netif_msg_probe(de))
1833 printk(KERN_INFO "de%d: SROM leaf offset %u, default media %s\n", 1832 pr_info("de%d: SROM leaf offset %u, default media %s\n",
1834 de->board_idx, ofs, 1833 de->board_idx, ofs, media_name[de->media_type]);
1835 media_name[de->media_type]);
1836 1834
1837 /* init SIA register values to defaults */ 1835 /* init SIA register values to defaults */
1838 for (i = 0; i < DE_MAX_MEDIA; i++) { 1836 for (i = 0; i < DE_MAX_MEDIA; i++) {
@@ -1879,9 +1877,9 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
1879 de->media[idx].type = idx; 1877 de->media[idx].type = idx;
1880 1878
1881 if (netif_msg_probe(de)) 1879 if (netif_msg_probe(de))
1882 printk(KERN_INFO "de%d: media block #%u: %s", 1880 pr_info("de%d: media block #%u: %s",
1883 de->board_idx, i, 1881 de->board_idx, i,
1884 media_name[de->media[idx].type]); 1882 media_name[de->media[idx].type]);
1885 1883
1886 bufp += sizeof (ib->opts); 1884 bufp += sizeof (ib->opts);
1887 1885
@@ -1893,13 +1891,13 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
1893 sizeof(ib->csr15); 1891 sizeof(ib->csr15);
1894 1892
1895 if (netif_msg_probe(de)) 1893 if (netif_msg_probe(de))
1896 printk(" (%x,%x,%x)\n", 1894 pr_cont(" (%x,%x,%x)\n",
1897 de->media[idx].csr13, 1895 de->media[idx].csr13,
1898 de->media[idx].csr14, 1896 de->media[idx].csr14,
1899 de->media[idx].csr15); 1897 de->media[idx].csr15);
1900 1898
1901 } else if (netif_msg_probe(de)) 1899 } else if (netif_msg_probe(de))
1902 printk("\n"); 1900 pr_cont("\n");
1903 1901
1904 if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3])) 1902 if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1905 break; 1903 break;
@@ -2005,7 +2003,7 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2005 /* check for invalid IRQ value */ 2003 /* check for invalid IRQ value */
2006 if (pdev->irq < 2) { 2004 if (pdev->irq < 2) {
2007 rc = -EIO; 2005 rc = -EIO;
2008 printk(KERN_ERR PFX "invalid irq (%d) for pci dev %s\n", 2006 pr_err(PFX "invalid irq (%d) for pci dev %s\n",
2009 pdev->irq, pci_name(pdev)); 2007 pdev->irq, pci_name(pdev));
2010 goto err_out_res; 2008 goto err_out_res;
2011 } 2009 }
@@ -2016,14 +2014,14 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2016 pciaddr = pci_resource_start(pdev, 1); 2014 pciaddr = pci_resource_start(pdev, 1);
2017 if (!pciaddr) { 2015 if (!pciaddr) {
2018 rc = -EIO; 2016 rc = -EIO;
2019 printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n", 2017 pr_err(PFX "no MMIO resource for pci dev %s\n", pci_name(pdev));
2020 pci_name(pdev));
2021 goto err_out_res; 2018 goto err_out_res;
2022 } 2019 }
2023 if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) { 2020 if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2024 rc = -EIO; 2021 rc = -EIO;
2025 printk(KERN_ERR PFX "MMIO resource (%llx) too small on pci dev %s\n", 2022 pr_err(PFX "MMIO resource (%llx) too small on pci dev %s\n",
2026 (unsigned long long)pci_resource_len(pdev, 1), pci_name(pdev)); 2023 (unsigned long long)pci_resource_len(pdev, 1),
2024 pci_name(pdev));
2027 goto err_out_res; 2025 goto err_out_res;
2028 } 2026 }
2029 2027
@@ -2031,9 +2029,9 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2031 regs = ioremap_nocache(pciaddr, DE_REGS_SIZE); 2029 regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
2032 if (!regs) { 2030 if (!regs) {
2033 rc = -EIO; 2031 rc = -EIO;
2034 printk(KERN_ERR PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n", 2032 pr_err(PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2035 (unsigned long long)pci_resource_len(pdev, 1), 2033 (unsigned long long)pci_resource_len(pdev, 1),
2036 pciaddr, pci_name(pdev)); 2034 pciaddr, pci_name(pdev));
2037 goto err_out_res; 2035 goto err_out_res;
2038 } 2036 }
2039 dev->base_addr = (unsigned long) regs; 2037 dev->base_addr = (unsigned long) regs;
@@ -2044,8 +2042,7 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2044 /* make sure hardware is not running */ 2042 /* make sure hardware is not running */
2045 rc = de_reset_mac(de); 2043 rc = de_reset_mac(de);
2046 if (rc) { 2044 if (rc) {
2047 printk(KERN_ERR PFX "Cannot reset MAC, pci dev %s\n", 2045 pr_err(PFX "Cannot reset MAC, pci dev %s\n", pci_name(pdev));
2048 pci_name(pdev));
2049 goto err_out_iomap; 2046 goto err_out_iomap;
2050 } 2047 }
2051 2048
@@ -2065,12 +2062,11 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2065 goto err_out_iomap; 2062 goto err_out_iomap;
2066 2063
2067 /* print info about board and interface just registered */ 2064 /* print info about board and interface just registered */
2068 printk (KERN_INFO "%s: %s at 0x%lx, %pM, IRQ %d\n", 2065 dev_info(&dev->dev, "%s at 0x%lx, %pM, IRQ %d\n",
2069 dev->name, 2066 de->de21040 ? "21040" : "21041",
2070 de->de21040 ? "21040" : "21041", 2067 dev->base_addr,
2071 dev->base_addr, 2068 dev->dev_addr,
2072 dev->dev_addr, 2069 dev->irq);
2073 dev->irq);
2074 2070
2075 pci_set_drvdata(pdev, dev); 2071 pci_set_drvdata(pdev, dev);
2076 2072
@@ -2158,8 +2154,7 @@ static int de_resume (struct pci_dev *pdev)
2158 if (!netif_running(dev)) 2154 if (!netif_running(dev))
2159 goto out_attach; 2155 goto out_attach;
2160 if ((retval = pci_enable_device(pdev))) { 2156 if ((retval = pci_enable_device(pdev))) {
2161 printk (KERN_ERR "%s: pci_enable_device failed in resume\n", 2157 dev_err(&dev->dev, "pci_enable_device failed in resume\n");
2162 dev->name);
2163 goto out; 2158 goto out;
2164 } 2159 }
2165 de_init_hw(de); 2160 de_init_hw(de);
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index a8349b7200b5..c4ecb9a95409 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1951,9 +1951,9 @@ static void
1951SetMulticastFilter(struct net_device *dev) 1951SetMulticastFilter(struct net_device *dev)
1952{ 1952{
1953 struct de4x5_private *lp = netdev_priv(dev); 1953 struct de4x5_private *lp = netdev_priv(dev);
1954 struct dev_mc_list *dmi=dev->mc_list; 1954 struct dev_mc_list *dmi;
1955 u_long iobase = dev->base_addr; 1955 u_long iobase = dev->base_addr;
1956 int i, j, bit, byte; 1956 int i, bit, byte;
1957 u16 hashcode; 1957 u16 hashcode;
1958 u32 omr, crc; 1958 u32 omr, crc;
1959 char *pa; 1959 char *pa;
@@ -1963,12 +1963,11 @@ SetMulticastFilter(struct net_device *dev)
1963 omr &= ~(OMR_PR | OMR_PM); 1963 omr &= ~(OMR_PR | OMR_PM);
1964 pa = build_setup_frame(dev, ALL); /* Build the basic frame */ 1964 pa = build_setup_frame(dev, ALL); /* Build the basic frame */
1965 1965
1966 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 14)) { 1966 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
1967 omr |= OMR_PM; /* Pass all multicasts */ 1967 omr |= OMR_PM; /* Pass all multicasts */
1968 } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */ 1968 } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
1969 for (i=0;i<dev->mc_count;i++) { /* for each address in the list */ 1969 netdev_for_each_mc_addr(dmi, dev) {
1970 addrs=dmi->dmi_addr; 1970 addrs = dmi->dmi_addr;
1971 dmi=dmi->next;
1972 if ((*addrs & 0x01) == 1) { /* multicast address? */ 1971 if ((*addrs & 0x01) == 1) { /* multicast address? */
1973 crc = ether_crc_le(ETH_ALEN, addrs); 1972 crc = ether_crc_le(ETH_ALEN, addrs);
1974 hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */ 1973 hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */
@@ -1984,9 +1983,8 @@ SetMulticastFilter(struct net_device *dev)
1984 } 1983 }
1985 } 1984 }
1986 } else { /* Perfect filtering */ 1985 } else { /* Perfect filtering */
1987 for (j=0; j<dev->mc_count; j++) { 1986 netdev_for_each_mc_addr(dmi, dev) {
1988 addrs=dmi->dmi_addr; 1987 addrs = dmi->dmi_addr;
1989 dmi=dmi->next;
1990 for (i=0; i<ETH_ALEN; i++) { 1988 for (i=0; i<ETH_ALEN; i++) {
1991 *(pa + (i&1)) = *addrs++; 1989 *(pa + (i&1)) = *addrs++;
1992 if (i & 0x01) pa += 4; 1990 if (i & 0x01) pa += 4;
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 6f44ebf58910..95b38d803e9b 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -61,6 +61,8 @@
61 Test and make sure PCI latency is now correct for all cases. 61 Test and make sure PCI latency is now correct for all cases.
62*/ 62*/
63 63
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
64#define DRV_NAME "dmfe" 66#define DRV_NAME "dmfe"
65#define DRV_VERSION "1.36.4" 67#define DRV_VERSION "1.36.4"
66#define DRV_RELDATE "2002-01-17" 68#define DRV_RELDATE "2002-01-17"
@@ -149,16 +151,17 @@
149#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */ 151#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
150#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */ 152#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
151 153
152#define DMFE_DBUG(dbug_now, msg, value) \ 154#define DMFE_DBUG(dbug_now, msg, value) \
153 do { \ 155 do { \
154 if (dmfe_debug || (dbug_now)) \ 156 if (dmfe_debug || (dbug_now)) \
155 printk(KERN_ERR DRV_NAME ": %s %lx\n",\ 157 pr_err("%s %lx\n", \
156 (msg), (long) (value)); \ 158 (msg), (long) (value)); \
157 } while (0) 159 } while (0)
158 160
159#define SHOW_MEDIA_TYPE(mode) \ 161#define SHOW_MEDIA_TYPE(mode) \
160 printk (KERN_INFO DRV_NAME ": Change Speed to %sMhz %s duplex\n" , \ 162 pr_info("Change Speed to %sMhz %s duplex\n" , \
161 (mode & 1) ? "100":"10", (mode & 4) ? "full":"half"); 163 (mode & 1) ? "100":"10", \
164 (mode & 4) ? "full":"half");
162 165
163 166
164/* CR9 definition: SROM/MII */ 167/* CR9 definition: SROM/MII */
@@ -327,8 +330,8 @@ static void poll_dmfe (struct net_device *dev);
327static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long); 330static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
328static void allocate_rx_buffer(struct dmfe_board_info *); 331static void allocate_rx_buffer(struct dmfe_board_info *);
329static void update_cr6(u32, unsigned long); 332static void update_cr6(u32, unsigned long);
330static void send_filter_frame(struct DEVICE * ,int); 333static void send_filter_frame(struct DEVICE *);
331static void dm9132_id_table(struct DEVICE * ,int); 334static void dm9132_id_table(struct DEVICE *);
332static u16 phy_read(unsigned long, u8, u8, u32); 335static u16 phy_read(unsigned long, u8, u8, u32);
333static void phy_write(unsigned long, u8, u8, u16, u32); 336static void phy_write(unsigned long, u8, u8, u16, u32);
334static void phy_write_1bit(unsigned long, u32); 337static void phy_write_1bit(unsigned long, u32);
@@ -391,8 +394,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
391 struct device_node *dp = pci_device_to_OF_node(pdev); 394 struct device_node *dp = pci_device_to_OF_node(pdev);
392 395
393 if (dp && of_get_property(dp, "local-mac-address", NULL)) { 396 if (dp && of_get_property(dp, "local-mac-address", NULL)) {
394 printk(KERN_INFO DRV_NAME 397 pr_info("skipping on-board DM910x (use tulip)\n");
395 ": skipping on-board DM910x (use tulip)\n");
396 return -ENODEV; 398 return -ENODEV;
397 } 399 }
398 } 400 }
@@ -405,8 +407,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
405 SET_NETDEV_DEV(dev, &pdev->dev); 407 SET_NETDEV_DEV(dev, &pdev->dev);
406 408
407 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 409 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
408 printk(KERN_WARNING DRV_NAME 410 pr_warning("32-bit PCI DMA not available\n");
409 ": 32-bit PCI DMA not available.\n");
410 err = -ENODEV; 411 err = -ENODEV;
411 goto err_out_free; 412 goto err_out_free;
412 } 413 }
@@ -417,13 +418,13 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
417 goto err_out_free; 418 goto err_out_free;
418 419
419 if (!pci_resource_start(pdev, 0)) { 420 if (!pci_resource_start(pdev, 0)) {
420 printk(KERN_ERR DRV_NAME ": I/O base is zero\n"); 421 pr_err("I/O base is zero\n");
421 err = -ENODEV; 422 err = -ENODEV;
422 goto err_out_disable; 423 goto err_out_disable;
423 } 424 }
424 425
425 if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) { 426 if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
426 printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n"); 427 pr_err("Allocated I/O size too small\n");
427 err = -ENODEV; 428 err = -ENODEV;
428 goto err_out_disable; 429 goto err_out_disable;
429 } 430 }
@@ -438,7 +439,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
438#endif 439#endif
439 440
440 if (pci_request_regions(pdev, DRV_NAME)) { 441 if (pci_request_regions(pdev, DRV_NAME)) {
441 printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n"); 442 pr_err("Failed to request PCI regions\n");
442 err = -ENODEV; 443 err = -ENODEV;
443 goto err_out_disable; 444 goto err_out_disable;
444 } 445 }
@@ -497,12 +498,9 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
497 if (err) 498 if (err)
498 goto err_out_free_buf; 499 goto err_out_free_buf;
499 500
500 printk(KERN_INFO "%s: Davicom DM%04lx at pci%s, %pM, irq %d.\n", 501 dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
501 dev->name, 502 ent->driver_data >> 16,
502 ent->driver_data >> 16, 503 pci_name(pdev), dev->dev_addr, dev->irq);
503 pci_name(pdev),
504 dev->dev_addr,
505 dev->irq);
506 504
507 pci_set_master(pdev); 505 pci_set_master(pdev);
508 506
@@ -660,9 +658,9 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
660 658
661 /* Send setup frame */ 659 /* Send setup frame */
662 if (db->chip_id == PCI_DM9132_ID) 660 if (db->chip_id == PCI_DM9132_ID)
663 dm9132_id_table(dev, dev->mc_count); /* DM9132 */ 661 dm9132_id_table(dev); /* DM9132 */
664 else 662 else
665 send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */ 663 send_filter_frame(dev); /* DM9102/DM9102A */
666 664
667 /* Init CR7, interrupt active bit */ 665 /* Init CR7, interrupt active bit */
668 db->cr7_data = CR7_DEFAULT; 666 db->cr7_data = CR7_DEFAULT;
@@ -696,7 +694,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
696 694
697 /* Too large packet check */ 695 /* Too large packet check */
698 if (skb->len > MAX_PACKET_SIZE) { 696 if (skb->len > MAX_PACKET_SIZE) {
699 printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len); 697 pr_err("big packet = %d\n", (u16)skb->len);
700 dev_kfree_skb(skb); 698 dev_kfree_skb(skb);
701 return NETDEV_TX_OK; 699 return NETDEV_TX_OK;
702 } 700 }
@@ -706,8 +704,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
706 /* No Tx resource check, it never happen nromally */ 704 /* No Tx resource check, it never happen nromally */
707 if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) { 705 if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
708 spin_unlock_irqrestore(&db->lock, flags); 706 spin_unlock_irqrestore(&db->lock, flags);
709 printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", 707 pr_err("No Tx resource %ld\n", db->tx_queue_cnt);
710 db->tx_queue_cnt);
711 return NETDEV_TX_BUSY; 708 return NETDEV_TX_BUSY;
712 } 709 }
713 710
@@ -779,12 +776,11 @@ static int dmfe_stop(struct DEVICE *dev)
779 776
780#if 0 777#if 0
781 /* show statistic counter */ 778 /* show statistic counter */
782 printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx" 779 printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
783 " LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n", 780 db->tx_fifo_underrun, db->tx_excessive_collision,
784 db->tx_fifo_underrun, db->tx_excessive_collision, 781 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
785 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier, 782 db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
786 db->tx_jabber_timeout, db->reset_count, db->reset_cr8, 783 db->reset_fatal, db->reset_TXtimeout);
787 db->reset_fatal, db->reset_TXtimeout);
788#endif 784#endif
789 785
790 return 0; 786 return 0;
@@ -885,7 +881,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
885 txptr = db->tx_remove_ptr; 881 txptr = db->tx_remove_ptr;
886 while(db->tx_packet_cnt) { 882 while(db->tx_packet_cnt) {
887 tdes0 = le32_to_cpu(txptr->tdes0); 883 tdes0 = le32_to_cpu(txptr->tdes0);
888 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */ 884 pr_debug("tdes0=%x\n", tdes0);
889 if (tdes0 & 0x80000000) 885 if (tdes0 & 0x80000000)
890 break; 886 break;
891 887
@@ -895,7 +891,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
895 891
896 /* Transmit statistic counter */ 892 /* Transmit statistic counter */
897 if ( tdes0 != 0x7fffffff ) { 893 if ( tdes0 != 0x7fffffff ) {
898 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */ 894 pr_debug("tdes0=%x\n", tdes0);
899 dev->stats.collisions += (tdes0 >> 3) & 0xf; 895 dev->stats.collisions += (tdes0 >> 3) & 0xf;
900 dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff; 896 dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
901 if (tdes0 & TDES0_ERR_MASK) { 897 if (tdes0 & TDES0_ERR_MASK) {
@@ -992,7 +988,7 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
992 /* error summary bit check */ 988 /* error summary bit check */
993 if (rdes0 & 0x8000) { 989 if (rdes0 & 0x8000) {
994 /* This is a error packet */ 990 /* This is a error packet */
995 //printk(DRV_NAME ": rdes0: %lx\n", rdes0); 991 pr_debug("rdes0: %x\n", rdes0);
996 dev->stats.rx_errors++; 992 dev->stats.rx_errors++;
997 if (rdes0 & 1) 993 if (rdes0 & 1)
998 dev->stats.rx_fifo_errors++; 994 dev->stats.rx_fifo_errors++;
@@ -1056,6 +1052,7 @@ static void dmfe_set_filter_mode(struct DEVICE * dev)
1056{ 1052{
1057 struct dmfe_board_info *db = netdev_priv(dev); 1053 struct dmfe_board_info *db = netdev_priv(dev);
1058 unsigned long flags; 1054 unsigned long flags;
1055 int mc_count = netdev_mc_count(dev);
1059 1056
1060 DMFE_DBUG(0, "dmfe_set_filter_mode()", 0); 1057 DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1061 spin_lock_irqsave(&db->lock, flags); 1058 spin_lock_irqsave(&db->lock, flags);
@@ -1068,19 +1065,19 @@ static void dmfe_set_filter_mode(struct DEVICE * dev)
1068 return; 1065 return;
1069 } 1066 }
1070 1067
1071 if (dev->flags & IFF_ALLMULTI || dev->mc_count > DMFE_MAX_MULTICAST) { 1068 if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) {
1072 DMFE_DBUG(0, "Pass all multicast address", dev->mc_count); 1069 DMFE_DBUG(0, "Pass all multicast address", mc_count);
1073 db->cr6_data &= ~(CR6_PM | CR6_PBF); 1070 db->cr6_data &= ~(CR6_PM | CR6_PBF);
1074 db->cr6_data |= CR6_PAM; 1071 db->cr6_data |= CR6_PAM;
1075 spin_unlock_irqrestore(&db->lock, flags); 1072 spin_unlock_irqrestore(&db->lock, flags);
1076 return; 1073 return;
1077 } 1074 }
1078 1075
1079 DMFE_DBUG(0, "Set multicast address", dev->mc_count); 1076 DMFE_DBUG(0, "Set multicast address", mc_count);
1080 if (db->chip_id == PCI_DM9132_ID) 1077 if (db->chip_id == PCI_DM9132_ID)
1081 dm9132_id_table(dev, dev->mc_count); /* DM9132 */ 1078 dm9132_id_table(dev); /* DM9132 */
1082 else 1079 else
1083 send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */ 1080 send_filter_frame(dev); /* DM9102/DM9102A */
1084 spin_unlock_irqrestore(&db->lock, flags); 1081 spin_unlock_irqrestore(&db->lock, flags);
1085} 1082}
1086 1083
@@ -1191,8 +1188,7 @@ static void dmfe_timer(unsigned long data)
1191 if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) { 1188 if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
1192 db->reset_TXtimeout++; 1189 db->reset_TXtimeout++;
1193 db->wait_reset = 1; 1190 db->wait_reset = 1;
1194 printk(KERN_WARNING "%s: Tx timeout - resetting\n", 1191 dev_warn(&dev->dev, "Tx timeout - resetting\n");
1195 dev->name);
1196 } 1192 }
1197 } 1193 }
1198 1194
@@ -1456,7 +1452,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1456 * This setup frame initilize DM910X address filter mode 1452 * This setup frame initilize DM910X address filter mode
1457*/ 1453*/
1458 1454
1459static void dm9132_id_table(struct DEVICE *dev, int mc_cnt) 1455static void dm9132_id_table(struct DEVICE *dev)
1460{ 1456{
1461 struct dev_mc_list *mcptr; 1457 struct dev_mc_list *mcptr;
1462 u16 * addrptr; 1458 u16 * addrptr;
@@ -1476,15 +1472,14 @@ static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
1476 ioaddr += 4; 1472 ioaddr += 4;
1477 1473
1478 /* Clear Hash Table */ 1474 /* Clear Hash Table */
1479 for (i = 0; i < 4; i++) 1475 memset(hash_table, 0, sizeof(hash_table));
1480 hash_table[i] = 0x0;
1481 1476
1482 /* broadcast address */ 1477 /* broadcast address */
1483 hash_table[3] = 0x8000; 1478 hash_table[3] = 0x8000;
1484 1479
1485 /* the multicast address in Hash Table : 64 bits */ 1480 /* the multicast address in Hash Table : 64 bits */
1486 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) { 1481 netdev_for_each_mc_addr(mcptr, dev) {
1487 hash_val = cal_CRC( (char *) mcptr->dmi_addr, 6, 0) & 0x3f; 1482 hash_val = cal_CRC((char *) mcptr->dmi_addr, 6, 0) & 0x3f;
1488 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); 1483 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1489 } 1484 }
1490 1485
@@ -1499,7 +1494,7 @@ static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
1499 * This setup frame initilize DM910X address filter mode 1494 * This setup frame initilize DM910X address filter mode
1500 */ 1495 */
1501 1496
1502static void send_filter_frame(struct DEVICE *dev, int mc_cnt) 1497static void send_filter_frame(struct DEVICE *dev)
1503{ 1498{
1504 struct dmfe_board_info *db = netdev_priv(dev); 1499 struct dmfe_board_info *db = netdev_priv(dev);
1505 struct dev_mc_list *mcptr; 1500 struct dev_mc_list *mcptr;
@@ -1525,14 +1520,14 @@ static void send_filter_frame(struct DEVICE *dev, int mc_cnt)
1525 *suptr++ = 0xffff; 1520 *suptr++ = 0xffff;
1526 1521
1527 /* fit the multicast address */ 1522 /* fit the multicast address */
1528 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) { 1523 netdev_for_each_mc_addr(mcptr, dev) {
1529 addrptr = (u16 *) mcptr->dmi_addr; 1524 addrptr = (u16 *) mcptr->dmi_addr;
1530 *suptr++ = addrptr[0]; 1525 *suptr++ = addrptr[0];
1531 *suptr++ = addrptr[1]; 1526 *suptr++ = addrptr[1];
1532 *suptr++ = addrptr[2]; 1527 *suptr++ = addrptr[2];
1533 } 1528 }
1534 1529
1535 for (; i<14; i++) { 1530 for (i = netdev_mc_count(dev); i < 14; i++) {
1536 *suptr++ = 0xffff; 1531 *suptr++ = 0xffff;
1537 *suptr++ = 0xffff; 1532 *suptr++ = 0xffff;
1538 *suptr++ = 0xffff; 1533 *suptr++ = 0xffff;
@@ -1646,7 +1641,7 @@ static u8 dmfe_sense_speed(struct dmfe_board_info * db)
1646 else /* DM9102/DM9102A */ 1641 else /* DM9102/DM9102A */
1647 phy_mode = phy_read(db->ioaddr, 1642 phy_mode = phy_read(db->ioaddr,
1648 db->phy_addr, 17, db->chip_id) & 0xf000; 1643 db->phy_addr, 17, db->chip_id) & 0xf000;
1649 /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */ 1644 pr_debug("Phy_mode %x\n", phy_mode);
1650 switch (phy_mode) { 1645 switch (phy_mode) {
1651 case 0x1000: db->op_mode = DMFE_10MHF; break; 1646 case 0x1000: db->op_mode = DMFE_10MHF; break;
1652 case 0x2000: db->op_mode = DMFE_10MFD; break; 1647 case 0x2000: db->op_mode = DMFE_10MFD; break;
@@ -2089,7 +2084,7 @@ static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2089 2084
2090 2085
2091 2086
2092static struct pci_device_id dmfe_pci_tbl[] = { 2087static DEFINE_PCI_DEVICE_TABLE(dmfe_pci_tbl) = {
2093 { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID }, 2088 { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2094 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID }, 2089 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2095 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID }, 2090 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
index 889f57aae89b..93f4e8309f81 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/tulip/eeprom.c
@@ -161,15 +161,15 @@ void __devinit tulip_parse_eeprom(struct net_device *dev)
161 if (ee_data[0] == 0xff) { 161 if (ee_data[0] == 0xff) {
162 if (last_mediatable) { 162 if (last_mediatable) {
163 controller_index++; 163 controller_index++;
164 printk(KERN_INFO "%s: Controller %d of multiport board.\n", 164 dev_info(&dev->dev,
165 dev->name, controller_index); 165 "Controller %d of multiport board\n",
166 controller_index);
166 tp->mtable = last_mediatable; 167 tp->mtable = last_mediatable;
167 ee_data = last_ee_data; 168 ee_data = last_ee_data;
168 goto subsequent_board; 169 goto subsequent_board;
169 } else 170 } else
170 printk(KERN_INFO "%s: Missing EEPROM, this interface may " 171 dev_info(&dev->dev,
171 "not work correctly!\n", 172 "Missing EEPROM, this interface may not work correctly!\n");
172 dev->name);
173 return; 173 return;
174 } 174 }
175 /* Do a fix-up based on the vendor half of the station address prefix. */ 175 /* Do a fix-up based on the vendor half of the station address prefix. */
@@ -181,16 +181,15 @@ void __devinit tulip_parse_eeprom(struct net_device *dev)
181 i++; /* An Accton EN1207, not an outlaw Maxtech. */ 181 i++; /* An Accton EN1207, not an outlaw Maxtech. */
182 memcpy(ee_data + 26, eeprom_fixups[i].newtable, 182 memcpy(ee_data + 26, eeprom_fixups[i].newtable,
183 sizeof(eeprom_fixups[i].newtable)); 183 sizeof(eeprom_fixups[i].newtable));
184 printk(KERN_INFO "%s: Old format EEPROM on '%s' board. Using" 184 dev_info(&dev->dev,
185 " substitute media control info.\n", 185 "Old format EEPROM on '%s' board. Using substitute media control info\n",
186 dev->name, eeprom_fixups[i].name); 186 eeprom_fixups[i].name);
187 break; 187 break;
188 } 188 }
189 } 189 }
190 if (eeprom_fixups[i].name == NULL) { /* No fixup found. */ 190 if (eeprom_fixups[i].name == NULL) { /* No fixup found. */
191 printk(KERN_INFO "%s: Old style EEPROM with no media selection " 191 dev_info(&dev->dev,
192 "information.\n", 192 "Old style EEPROM with no media selection information\n");
193 dev->name);
194 return; 193 return;
195 } 194 }
196 } 195 }
@@ -218,7 +217,8 @@ subsequent_board:
218 /* there is no phy information, don't even try to build mtable */ 217 /* there is no phy information, don't even try to build mtable */
219 if (count == 0) { 218 if (count == 0) {
220 if (tulip_debug > 0) 219 if (tulip_debug > 0)
221 printk(KERN_WARNING "%s: no phy info, aborting mtable build\n", dev->name); 220 dev_warn(&dev->dev,
221 "no phy info, aborting mtable build\n");
222 return; 222 return;
223 } 223 }
224 224
@@ -234,8 +234,8 @@ subsequent_board:
234 mtable->has_nonmii = mtable->has_mii = mtable->has_reset = 0; 234 mtable->has_nonmii = mtable->has_mii = mtable->has_reset = 0;
235 mtable->csr15dir = mtable->csr15val = 0; 235 mtable->csr15dir = mtable->csr15val = 0;
236 236
237 printk(KERN_INFO "%s: EEPROM default media type %s.\n", dev->name, 237 dev_info(&dev->dev, "EEPROM default media type %s\n",
238 media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]); 238 media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
239 for (i = 0; i < count; i++) { 239 for (i = 0; i < count; i++) {
240 struct medialeaf *leaf = &mtable->mleaf[i]; 240 struct medialeaf *leaf = &mtable->mleaf[i];
241 241
@@ -298,16 +298,17 @@ subsequent_board:
298 } 298 }
299 if (tulip_debug > 1 && leaf->media == 11) { 299 if (tulip_debug > 1 && leaf->media == 11) {
300 unsigned char *bp = leaf->leafdata; 300 unsigned char *bp = leaf->leafdata;
301 printk(KERN_INFO "%s: MII interface PHY %d, setup/reset " 301 dev_info(&dev->dev,
302 "sequences %d/%d long, capabilities %2.2x %2.2x.\n", 302 "MII interface PHY %d, setup/reset sequences %d/%d long, capabilities %02x %02x\n",
303 dev->name, bp[0], bp[1], bp[2 + bp[1]*2], 303 bp[0], bp[1], bp[2 + bp[1]*2],
304 bp[5 + bp[2 + bp[1]*2]*2], bp[4 + bp[2 + bp[1]*2]*2]); 304 bp[5 + bp[2 + bp[1]*2]*2],
305 bp[4 + bp[2 + bp[1]*2]*2]);
305 } 306 }
306 printk(KERN_INFO "%s: Index #%d - Media %s (#%d) described " 307 dev_info(&dev->dev,
307 "by a %s (%d) block.\n", 308 "Index #%d - Media %s (#%d) described by a %s (%d) block\n",
308 dev->name, i, medianame[leaf->media & 15], leaf->media, 309 i, medianame[leaf->media & 15], leaf->media,
309 leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>", 310 leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>",
310 leaf->type); 311 leaf->type);
311 } 312 }
312 if (new_advertise) 313 if (new_advertise)
313 tp->sym_advertise = new_advertise; 314 tp->sym_advertise = new_advertise;
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index 2e8e8ee893c7..1faf7a4d7202 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -125,12 +125,12 @@ int tulip_poll(struct napi_struct *napi, int budget)
125#endif 125#endif
126 126
127 if (tulip_debug > 4) 127 if (tulip_debug > 4)
128 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry, 128 printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
129 tp->rx_ring[entry].status); 129 entry, tp->rx_ring[entry].status);
130 130
131 do { 131 do {
132 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) { 132 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
133 printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n"); 133 printk(KERN_DEBUG " In tulip_poll(), hardware disappeared\n");
134 break; 134 break;
135 } 135 }
136 /* Acknowledge current RX interrupt sources. */ 136 /* Acknowledge current RX interrupt sources. */
@@ -146,7 +146,7 @@ int tulip_poll(struct napi_struct *napi, int budget)
146 break; 146 break;
147 147
148 if (tulip_debug > 5) 148 if (tulip_debug > 5)
149 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n", 149 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
150 dev->name, entry, status); 150 dev->name, entry, status);
151 151
152 if (++work_done >= budget) 152 if (++work_done >= budget)
@@ -177,15 +177,15 @@ int tulip_poll(struct napi_struct *napi, int budget)
177 /* Ingore earlier buffers. */ 177 /* Ingore earlier buffers. */
178 if ((status & 0xffff) != 0x7fff) { 178 if ((status & 0xffff) != 0x7fff) {
179 if (tulip_debug > 1) 179 if (tulip_debug > 1)
180 printk(KERN_WARNING "%s: Oversized Ethernet frame " 180 dev_warn(&dev->dev,
181 "spanned multiple buffers, status %8.8x!\n", 181 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
182 dev->name, status); 182 status);
183 tp->stats.rx_length_errors++; 183 tp->stats.rx_length_errors++;
184 } 184 }
185 } else { 185 } else {
186 /* There was a fatal error. */ 186 /* There was a fatal error. */
187 if (tulip_debug > 2) 187 if (tulip_debug > 2)
188 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n", 188 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
189 dev->name, status); 189 dev->name, status);
190 tp->stats.rx_errors++; /* end of a packet.*/ 190 tp->stats.rx_errors++; /* end of a packet.*/
191 if (pkt_len > 1518 || 191 if (pkt_len > 1518 ||
@@ -226,12 +226,11 @@ int tulip_poll(struct napi_struct *napi, int budget)
226#ifndef final_version 226#ifndef final_version
227 if (tp->rx_buffers[entry].mapping != 227 if (tp->rx_buffers[entry].mapping !=
228 le32_to_cpu(tp->rx_ring[entry].buffer1)) { 228 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
229 printk(KERN_ERR "%s: Internal fault: The skbuff addresses " 229 dev_err(&dev->dev,
230 "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n", 230 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
231 dev->name, 231 le32_to_cpu(tp->rx_ring[entry].buffer1),
232 le32_to_cpu(tp->rx_ring[entry].buffer1), 232 (unsigned long long)tp->rx_buffers[entry].mapping,
233 (unsigned long long)tp->rx_buffers[entry].mapping, 233 skb->head, temp);
234 skb->head, temp);
235 } 234 }
236#endif 235#endif
237 236
@@ -365,16 +364,16 @@ static int tulip_rx(struct net_device *dev)
365 int received = 0; 364 int received = 0;
366 365
367 if (tulip_debug > 4) 366 if (tulip_debug > 4)
368 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry, 367 printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
369 tp->rx_ring[entry].status); 368 entry, tp->rx_ring[entry].status);
370 /* If we own the next entry, it is a new packet. Send it up. */ 369 /* If we own the next entry, it is a new packet. Send it up. */
371 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { 370 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
372 s32 status = le32_to_cpu(tp->rx_ring[entry].status); 371 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
373 short pkt_len; 372 short pkt_len;
374 373
375 if (tulip_debug > 5) 374 if (tulip_debug > 5)
376 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n", 375 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
377 dev->name, entry, status); 376 dev->name, entry, status);
378 if (--rx_work_limit < 0) 377 if (--rx_work_limit < 0)
379 break; 378 break;
380 379
@@ -402,16 +401,16 @@ static int tulip_rx(struct net_device *dev)
402 /* Ingore earlier buffers. */ 401 /* Ingore earlier buffers. */
403 if ((status & 0xffff) != 0x7fff) { 402 if ((status & 0xffff) != 0x7fff) {
404 if (tulip_debug > 1) 403 if (tulip_debug > 1)
405 printk(KERN_WARNING "%s: Oversized Ethernet frame " 404 dev_warn(&dev->dev,
406 "spanned multiple buffers, status %8.8x!\n", 405 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
407 dev->name, status); 406 status);
408 tp->stats.rx_length_errors++; 407 tp->stats.rx_length_errors++;
409 } 408 }
410 } else { 409 } else {
411 /* There was a fatal error. */ 410 /* There was a fatal error. */
412 if (tulip_debug > 2) 411 if (tulip_debug > 2)
413 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n", 412 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
414 dev->name, status); 413 dev->name, status);
415 tp->stats.rx_errors++; /* end of a packet.*/ 414 tp->stats.rx_errors++; /* end of a packet.*/
416 if (pkt_len > 1518 || 415 if (pkt_len > 1518 ||
417 (status & RxDescRunt)) 416 (status & RxDescRunt))
@@ -450,12 +449,11 @@ static int tulip_rx(struct net_device *dev)
450#ifndef final_version 449#ifndef final_version
451 if (tp->rx_buffers[entry].mapping != 450 if (tp->rx_buffers[entry].mapping !=
452 le32_to_cpu(tp->rx_ring[entry].buffer1)) { 451 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
453 printk(KERN_ERR "%s: Internal fault: The skbuff addresses " 452 dev_err(&dev->dev,
454 "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n", 453 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
455 dev->name, 454 le32_to_cpu(tp->rx_ring[entry].buffer1),
456 le32_to_cpu(tp->rx_ring[entry].buffer1), 455 (long long)tp->rx_buffers[entry].mapping,
457 (long long)tp->rx_buffers[entry].mapping, 456 skb->head, temp);
458 skb->head, temp);
459 } 457 }
460#endif 458#endif
461 459
@@ -569,7 +567,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
569#endif /* CONFIG_TULIP_NAPI */ 567#endif /* CONFIG_TULIP_NAPI */
570 568
571 if (tulip_debug > 4) 569 if (tulip_debug > 4)
572 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n", 570 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x\n",
573 dev->name, csr5, ioread32(ioaddr + CSR5)); 571 dev->name, csr5, ioread32(ioaddr + CSR5));
574 572
575 573
@@ -601,8 +599,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
601 /* There was an major error, log it. */ 599 /* There was an major error, log it. */
602#ifndef final_version 600#ifndef final_version
603 if (tulip_debug > 1) 601 if (tulip_debug > 1)
604 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", 602 printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
605 dev->name, status); 603 dev->name, status);
606#endif 604#endif
607 tp->stats.tx_errors++; 605 tp->stats.tx_errors++;
608 if (status & 0x4104) tp->stats.tx_aborted_errors++; 606 if (status & 0x4104) tp->stats.tx_aborted_errors++;
@@ -631,8 +629,9 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
631 629
632#ifndef final_version 630#ifndef final_version
633 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) { 631 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
634 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n", 632 dev_err(&dev->dev,
635 dev->name, dirty_tx, tp->cur_tx); 633 "Out-of-sync dirty pointer, %d vs. %d\n",
634 dirty_tx, tp->cur_tx);
636 dirty_tx += TX_RING_SIZE; 635 dirty_tx += TX_RING_SIZE;
637 } 636 }
638#endif 637#endif
@@ -643,9 +642,10 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
643 tp->dirty_tx = dirty_tx; 642 tp->dirty_tx = dirty_tx;
644 if (csr5 & TxDied) { 643 if (csr5 & TxDied) {
645 if (tulip_debug > 2) 644 if (tulip_debug > 2)
646 printk(KERN_WARNING "%s: The transmitter stopped." 645 dev_warn(&dev->dev,
647 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n", 646 "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n",
648 dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6); 647 csr5, ioread32(ioaddr + CSR6),
648 tp->csr6);
649 tulip_restart_rxtx(tp); 649 tulip_restart_rxtx(tp);
650 } 650 }
651 spin_unlock(&tp->lock); 651 spin_unlock(&tp->lock);
@@ -696,8 +696,9 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
696 * to the 21142/3 docs that is). 696 * to the 21142/3 docs that is).
697 * -- rmk 697 * -- rmk
698 */ 698 */
699 printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n", 699 dev_err(&dev->dev,
700 dev->name, tp->nir, error); 700 "(%lu) System Error occurred (%d)\n",
701 tp->nir, error);
701 } 702 }
702 /* Clear all error sources, included undocumented ones! */ 703 /* Clear all error sources, included undocumented ones! */
703 iowrite32(0x0800f7ba, ioaddr + CSR5); 704 iowrite32(0x0800f7ba, ioaddr + CSR5);
@@ -706,16 +707,17 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
706 if (csr5 & TimerInt) { 707 if (csr5 & TimerInt) {
707 708
708 if (tulip_debug > 2) 709 if (tulip_debug > 2)
709 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n", 710 dev_err(&dev->dev,
710 dev->name, csr5); 711 "Re-enabling interrupts, %08x\n",
712 csr5);
711 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7); 713 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
712 tp->ttimer = 0; 714 tp->ttimer = 0;
713 oi++; 715 oi++;
714 } 716 }
715 if (tx > maxtx || rx > maxrx || oi > maxoi) { 717 if (tx > maxtx || rx > maxrx || oi > maxoi) {
716 if (tulip_debug > 1) 718 if (tulip_debug > 1)
717 printk(KERN_WARNING "%s: Too much work during an interrupt, " 719 dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
718 "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi); 720 csr5, tp->nir, tx, rx, oi);
719 721
720 /* Acknowledge all interrupt sources. */ 722 /* Acknowledge all interrupt sources. */
721 iowrite32(0x8001ffff, ioaddr + CSR5); 723 iowrite32(0x8001ffff, ioaddr + CSR5);
@@ -764,14 +766,18 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
764 entry = tp->dirty_rx % RX_RING_SIZE; 766 entry = tp->dirty_rx % RX_RING_SIZE;
765 if (tp->rx_buffers[entry].skb == NULL) { 767 if (tp->rx_buffers[entry].skb == NULL) {
766 if (tulip_debug > 1) 768 if (tulip_debug > 1)
767 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx); 769 dev_warn(&dev->dev,
770 "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
771 tp->nir, tp->cur_rx, tp->ttimer, rx);
768 if (tp->chip_id == LC82C168) { 772 if (tp->chip_id == LC82C168) {
769 iowrite32(0x00, ioaddr + CSR7); 773 iowrite32(0x00, ioaddr + CSR7);
770 mod_timer(&tp->timer, RUN_AT(HZ/50)); 774 mod_timer(&tp->timer, RUN_AT(HZ/50));
771 } else { 775 } else {
772 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) { 776 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
773 if (tulip_debug > 1) 777 if (tulip_debug > 1)
774 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir); 778 dev_warn(&dev->dev,
779 "in rx suspend mode: (%lu) set timer\n",
780 tp->nir);
775 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt, 781 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
776 ioaddr + CSR7); 782 ioaddr + CSR7);
777 iowrite32(TimerInt, ioaddr + CSR5); 783 iowrite32(TimerInt, ioaddr + CSR5);
@@ -787,8 +793,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
787 } 793 }
788 794
789 if (tulip_debug > 4) 795 if (tulip_debug > 4)
790 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n", 796 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#04x\n",
791 dev->name, ioread32(ioaddr + CSR5)); 797 dev->name, ioread32(ioaddr + CSR5));
792 798
793 return IRQ_HANDLED; 799 return IRQ_HANDLED;
794} 800}
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index d8fda83705bf..68b170ae4d15 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -182,9 +182,8 @@ void tulip_select_media(struct net_device *dev, int startup)
182 switch (mleaf->type) { 182 switch (mleaf->type) {
183 case 0: /* 21140 non-MII xcvr. */ 183 case 0: /* 21140 non-MII xcvr. */
184 if (tulip_debug > 1) 184 if (tulip_debug > 1)
185 printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver" 185 printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver with control setting %02x\n",
186 " with control setting %2.2x.\n", 186 dev->name, p[1]);
187 dev->name, p[1]);
188 dev->if_port = p[0]; 187 dev->if_port = p[0];
189 if (startup) 188 if (startup)
190 iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12); 189 iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12);
@@ -205,15 +204,15 @@ void tulip_select_media(struct net_device *dev, int startup)
205 struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset]; 204 struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
206 unsigned char *rst = rleaf->leafdata; 205 unsigned char *rst = rleaf->leafdata;
207 if (tulip_debug > 1) 206 if (tulip_debug > 1)
208 printk(KERN_DEBUG "%s: Resetting the transceiver.\n", 207 printk(KERN_DEBUG "%s: Resetting the transceiver\n",
209 dev->name); 208 dev->name);
210 for (i = 0; i < rst[0]; i++) 209 for (i = 0; i < rst[0]; i++)
211 iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15); 210 iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
212 } 211 }
213 if (tulip_debug > 1) 212 if (tulip_debug > 1)
214 printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control " 213 printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control %04x/%04x\n",
215 "%4.4x/%4.4x.\n", 214 dev->name, medianame[dev->if_port],
216 dev->name, medianame[dev->if_port], setup[0], setup[1]); 215 setup[0], setup[1]);
217 if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */ 216 if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */
218 csr13val = setup[0]; 217 csr13val = setup[0];
219 csr14val = setup[1]; 218 csr14val = setup[1];
@@ -240,8 +239,8 @@ void tulip_select_media(struct net_device *dev, int startup)
240 if (startup) iowrite32(csr13val, ioaddr + CSR13); 239 if (startup) iowrite32(csr13val, ioaddr + CSR13);
241 } 240 }
242 if (tulip_debug > 1) 241 if (tulip_debug > 1)
243 printk(KERN_DEBUG "%s: Setting CSR15 to %8.8x/%8.8x.\n", 242 printk(KERN_DEBUG "%s: Setting CSR15 to %08x/%08x\n",
244 dev->name, csr15dir, csr15val); 243 dev->name, csr15dir, csr15val);
245 if (mleaf->type == 4) 244 if (mleaf->type == 4)
246 new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18); 245 new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18);
247 else 246 else
@@ -317,8 +316,9 @@ void tulip_select_media(struct net_device *dev, int startup)
317 if (tp->mii_advertise == 0) 316 if (tp->mii_advertise == 0)
318 tp->mii_advertise = tp->advertising[phy_num]; 317 tp->mii_advertise = tp->advertising[phy_num];
319 if (tulip_debug > 1) 318 if (tulip_debug > 1)
320 printk(KERN_DEBUG "%s: Advertising %4.4x on MII %d.\n", 319 printk(KERN_DEBUG "%s: Advertising %04x on MII %d\n",
321 dev->name, tp->mii_advertise, tp->phys[phy_num]); 320 dev->name, tp->mii_advertise,
321 tp->phys[phy_num]);
322 tulip_mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise); 322 tulip_mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise);
323 } 323 }
324 break; 324 break;
@@ -335,8 +335,8 @@ void tulip_select_media(struct net_device *dev, int startup)
335 struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset]; 335 struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
336 unsigned char *rst = rleaf->leafdata; 336 unsigned char *rst = rleaf->leafdata;
337 if (tulip_debug > 1) 337 if (tulip_debug > 1)
338 printk(KERN_DEBUG "%s: Resetting the transceiver.\n", 338 printk(KERN_DEBUG "%s: Resetting the transceiver\n",
339 dev->name); 339 dev->name);
340 for (i = 0; i < rst[0]; i++) 340 for (i = 0; i < rst[0]; i++)
341 iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15); 341 iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
342 } 342 }
@@ -344,20 +344,20 @@ void tulip_select_media(struct net_device *dev, int startup)
344 break; 344 break;
345 } 345 }
346 default: 346 default:
347 printk(KERN_DEBUG "%s: Invalid media table selection %d.\n", 347 printk(KERN_DEBUG "%s: Invalid media table selection %d\n",
348 dev->name, mleaf->type); 348 dev->name, mleaf->type);
349 new_csr6 = 0x020E0000; 349 new_csr6 = 0x020E0000;
350 } 350 }
351 if (tulip_debug > 1) 351 if (tulip_debug > 1)
352 printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %2.2x.\n", 352 printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %02x\n",
353 dev->name, medianame[dev->if_port], 353 dev->name, medianame[dev->if_port],
354 ioread32(ioaddr + CSR12) & 0xff); 354 ioread32(ioaddr + CSR12) & 0xff);
355 } else if (tp->chip_id == LC82C168) { 355 } else if (tp->chip_id == LC82C168) {
356 if (startup && ! tp->medialock) 356 if (startup && ! tp->medialock)
357 dev->if_port = tp->mii_cnt ? 11 : 0; 357 dev->if_port = tp->mii_cnt ? 11 : 0;
358 if (tulip_debug > 1) 358 if (tulip_debug > 1)
359 printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s.\n", 359 printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s\n",
360 dev->name, ioread32(ioaddr + 0xB8), medianame[dev->if_port]); 360 dev->name, ioread32(ioaddr + 0xB8), medianame[dev->if_port]);
361 if (tp->mii_cnt) { 361 if (tp->mii_cnt) {
362 new_csr6 = 0x810C0000; 362 new_csr6 = 0x810C0000;
363 iowrite32(0x0001, ioaddr + CSR15); 363 iowrite32(0x0001, ioaddr + CSR15);
@@ -388,10 +388,9 @@ void tulip_select_media(struct net_device *dev, int startup)
388 } else 388 } else
389 new_csr6 = 0x03860000; 389 new_csr6 = 0x03860000;
390 if (tulip_debug > 1) 390 if (tulip_debug > 1)
391 printk(KERN_DEBUG "%s: No media description table, assuming " 391 printk(KERN_DEBUG "%s: No media description table, assuming %s transceiver, CSR12 %02x\n",
392 "%s transceiver, CSR12 %2.2x.\n", 392 dev->name, medianame[dev->if_port],
393 dev->name, medianame[dev->if_port], 393 ioread32(ioaddr + CSR12));
394 ioread32(ioaddr + CSR12));
395 } 394 }
396 395
397 tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0); 396 tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0);
@@ -415,16 +414,17 @@ int tulip_check_duplex(struct net_device *dev)
415 bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR); 414 bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR);
416 lpa = tulip_mdio_read(dev, tp->phys[0], MII_LPA); 415 lpa = tulip_mdio_read(dev, tp->phys[0], MII_LPA);
417 if (tulip_debug > 1) 416 if (tulip_debug > 1)
418 printk(KERN_INFO "%s: MII status %4.4x, Link partner report " 417 dev_info(&dev->dev, "MII status %04x, Link partner report %04x\n",
419 "%4.4x.\n", dev->name, bmsr, lpa); 418 bmsr, lpa);
420 if (bmsr == 0xffff) 419 if (bmsr == 0xffff)
421 return -2; 420 return -2;
422 if ((bmsr & BMSR_LSTATUS) == 0) { 421 if ((bmsr & BMSR_LSTATUS) == 0) {
423 int new_bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR); 422 int new_bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR);
424 if ((new_bmsr & BMSR_LSTATUS) == 0) { 423 if ((new_bmsr & BMSR_LSTATUS) == 0) {
425 if (tulip_debug > 1) 424 if (tulip_debug > 1)
426 printk(KERN_INFO "%s: No link beat on the MII interface," 425 dev_info(&dev->dev,
427 " status %4.4x.\n", dev->name, new_bmsr); 426 "No link beat on the MII interface, status %04x\n",
427 new_bmsr);
428 return -1; 428 return -1;
429 } 429 }
430 } 430 }
@@ -443,10 +443,10 @@ int tulip_check_duplex(struct net_device *dev)
443 tulip_restart_rxtx(tp); 443 tulip_restart_rxtx(tp);
444 444
445 if (tulip_debug > 0) 445 if (tulip_debug > 0)
446 printk(KERN_INFO "%s: Setting %s-duplex based on MII" 446 dev_info(&dev->dev,
447 "#%d link partner capability of %4.4x.\n", 447 "Setting %s-duplex based on MII#%d link partner capability of %04x\n",
448 dev->name, tp->full_duplex ? "full" : "half", 448 tp->full_duplex ? "full" : "half",
449 tp->phys[0], lpa); 449 tp->phys[0], lpa);
450 return 1; 450 return 1;
451 } 451 }
452 452
@@ -501,15 +501,13 @@ void __devinit tulip_find_mii (struct net_device *dev, int board_idx)
501 501
502 tp->phys[phy_idx++] = phy; 502 tp->phys[phy_idx++] = phy;
503 503
504 printk (KERN_INFO "tulip%d: MII transceiver #%d " 504 pr_info("tulip%d: MII transceiver #%d config %04x status %04x advertising %04x\n",
505 "config %4.4x status %4.4x advertising %4.4x.\n",
506 board_idx, phy, mii_reg0, mii_status, mii_advert); 505 board_idx, phy, mii_reg0, mii_status, mii_advert);
507 506
508 /* Fixup for DLink with miswired PHY. */ 507 /* Fixup for DLink with miswired PHY. */
509 if (mii_advert != to_advert) { 508 if (mii_advert != to_advert) {
510 printk (KERN_DEBUG "tulip%d: Advertising %4.4x on PHY %d," 509 printk(KERN_DEBUG "tulip%d: Advertising %04x on PHY %d, previously advertising %04x\n",
511 " previously advertising %4.4x.\n", 510 board_idx, to_advert, phy, mii_advert);
512 board_idx, to_advert, phy, mii_advert);
513 tulip_mdio_write (dev, phy, 4, to_advert); 511 tulip_mdio_write (dev, phy, 4, to_advert);
514 } 512 }
515 513
@@ -554,7 +552,7 @@ void __devinit tulip_find_mii (struct net_device *dev, int board_idx)
554 } 552 }
555 tp->mii_cnt = phy_idx; 553 tp->mii_cnt = phy_idx;
556 if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) { 554 if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) {
557 printk (KERN_INFO "tulip%d: ***WARNING***: No MII transceiver found!\n", 555 pr_info("tulip%d: ***WARNING***: No MII transceiver found!\n",
558 board_idx); 556 board_idx);
559 tp->phys[0] = 1; 557 tp->phys[0] = 1;
560 } 558 }
diff --git a/drivers/net/tulip/pnic.c b/drivers/net/tulip/pnic.c
index d3253ed09dfc..966efa1a27d7 100644
--- a/drivers/net/tulip/pnic.c
+++ b/drivers/net/tulip/pnic.c
@@ -40,8 +40,8 @@ void pnic_do_nway(struct net_device *dev)
40 new_csr6 |= 0x00000200; 40 new_csr6 |= 0x00000200;
41 } 41 }
42 if (tulip_debug > 1) 42 if (tulip_debug > 1)
43 printk(KERN_DEBUG "%s: PNIC autonegotiated status %8.8x, %s.\n", 43 printk(KERN_DEBUG "%s: PNIC autonegotiated status %08x, %s\n",
44 dev->name, phy_reg, medianame[dev->if_port]); 44 dev->name, phy_reg, medianame[dev->if_port]);
45 if (tp->csr6 != new_csr6) { 45 if (tp->csr6 != new_csr6) {
46 tp->csr6 = new_csr6; 46 tp->csr6 = new_csr6;
47 /* Restart Tx */ 47 /* Restart Tx */
@@ -58,8 +58,8 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
58 int phy_reg = ioread32(ioaddr + 0xB8); 58 int phy_reg = ioread32(ioaddr + 0xB8);
59 59
60 if (tulip_debug > 1) 60 if (tulip_debug > 1)
61 printk(KERN_DEBUG "%s: PNIC link changed state %8.8x, CSR5 %8.8x.\n", 61 printk(KERN_DEBUG "%s: PNIC link changed state %08x, CSR5 %08x\n",
62 dev->name, phy_reg, csr5); 62 dev->name, phy_reg, csr5);
63 if (ioread32(ioaddr + CSR5) & TPLnkFail) { 63 if (ioread32(ioaddr + CSR5) & TPLnkFail) {
64 iowrite32((ioread32(ioaddr + CSR7) & ~TPLnkFail) | TPLnkPass, ioaddr + CSR7); 64 iowrite32((ioread32(ioaddr + CSR7) & ~TPLnkFail) | TPLnkPass, ioaddr + CSR7);
65 /* If we use an external MII, then we mustn't use the 65 /* If we use an external MII, then we mustn't use the
@@ -114,9 +114,8 @@ void pnic_timer(unsigned long data)
114 int csr5 = ioread32(ioaddr + CSR5); 114 int csr5 = ioread32(ioaddr + CSR5);
115 115
116 if (tulip_debug > 1) 116 if (tulip_debug > 1)
117 printk(KERN_DEBUG "%s: PNIC timer PHY status %8.8x, %s " 117 printk(KERN_DEBUG "%s: PNIC timer PHY status %08x, %s CSR5 %08x\n",
118 "CSR5 %8.8x.\n", 118 dev->name, phy_reg, medianame[dev->if_port], csr5);
119 dev->name, phy_reg, medianame[dev->if_port], csr5);
120 if (phy_reg & 0x04000000) { /* Remote link fault */ 119 if (phy_reg & 0x04000000) { /* Remote link fault */
121 iowrite32(0x0201F078, ioaddr + 0xB8); 120 iowrite32(0x0201F078, ioaddr + 0xB8);
122 next_tick = 1*HZ; 121 next_tick = 1*HZ;
@@ -126,10 +125,11 @@ void pnic_timer(unsigned long data)
126 next_tick = 60*HZ; 125 next_tick = 60*HZ;
127 } else if (csr5 & TPLnkFail) { /* 100baseTx link beat */ 126 } else if (csr5 & TPLnkFail) { /* 100baseTx link beat */
128 if (tulip_debug > 1) 127 if (tulip_debug > 1)
129 printk(KERN_DEBUG "%s: %s link beat failed, CSR12 %4.4x, " 128 printk(KERN_DEBUG "%s: %s link beat failed, CSR12 %04x, CSR5 %08x, PHY %03x\n",
130 "CSR5 %8.8x, PHY %3.3x.\n", 129 dev->name, medianame[dev->if_port],
131 dev->name, medianame[dev->if_port], csr12, 130 csr12,
132 ioread32(ioaddr + CSR5), ioread32(ioaddr + 0xB8)); 131 ioread32(ioaddr + CSR5),
132 ioread32(ioaddr + 0xB8));
133 next_tick = 3*HZ; 133 next_tick = 3*HZ;
134 if (tp->medialock) { 134 if (tp->medialock) {
135 } else if (tp->nwayset && (dev->if_port & 1)) { 135 } else if (tp->nwayset && (dev->if_port & 1)) {
@@ -151,10 +151,11 @@ void pnic_timer(unsigned long data)
151 tulip_restart_rxtx(tp); 151 tulip_restart_rxtx(tp);
152 dev->trans_start = jiffies; 152 dev->trans_start = jiffies;
153 if (tulip_debug > 1) 153 if (tulip_debug > 1)
154 printk(KERN_INFO "%s: Changing PNIC configuration to %s " 154 dev_info(&dev->dev,
155 "%s-duplex, CSR6 %8.8x.\n", 155 "Changing PNIC configuration to %s %s-duplex, CSR6 %08x\n",
156 dev->name, medianame[dev->if_port], 156 medianame[dev->if_port],
157 tp->full_duplex ? "full" : "half", new_csr6); 157 tp->full_duplex ? "full" : "half",
158 new_csr6);
158 } 159 }
159 } 160 }
160 } 161 }
@@ -162,7 +163,7 @@ too_good_connection:
162 mod_timer(&tp->timer, RUN_AT(next_tick)); 163 mod_timer(&tp->timer, RUN_AT(next_tick));
163 if(!ioread32(ioaddr + CSR7)) { 164 if(!ioread32(ioaddr + CSR7)) {
164 if (tulip_debug > 1) 165 if (tulip_debug > 1)
165 printk(KERN_INFO "%s: sw timer wakeup.\n", dev->name); 166 dev_info(&dev->dev, "sw timer wakeup\n");
166 disable_irq(dev->irq); 167 disable_irq(dev->irq);
167 tulip_refill_rx(dev); 168 tulip_refill_rx(dev);
168 enable_irq(dev->irq); 169 enable_irq(dev->irq);
diff --git a/drivers/net/tulip/pnic2.c b/drivers/net/tulip/pnic2.c
index d8418694bf46..b8197666021e 100644
--- a/drivers/net/tulip/pnic2.c
+++ b/drivers/net/tulip/pnic2.c
@@ -87,8 +87,8 @@ void pnic2_timer(unsigned long data)
87 int next_tick = 60*HZ; 87 int next_tick = 60*HZ;
88 88
89 if (tulip_debug > 3) 89 if (tulip_debug > 3)
90 printk(KERN_INFO"%s: PNIC2 negotiation status %8.8x.\n", 90 dev_info(&dev->dev, "PNIC2 negotiation status %08x\n",
91 dev->name,ioread32(ioaddr + CSR12)); 91 ioread32(ioaddr + CSR12));
92 92
93 if (next_tick) { 93 if (next_tick) {
94 mod_timer(&tp->timer, RUN_AT(next_tick)); 94 mod_timer(&tp->timer, RUN_AT(next_tick));
@@ -125,8 +125,8 @@ void pnic2_start_nway(struct net_device *dev)
125 csr14 |= 0x00001184; 125 csr14 |= 0x00001184;
126 126
127 if (tulip_debug > 1) 127 if (tulip_debug > 1)
128 printk(KERN_DEBUG "%s: Restarting PNIC2 autonegotiation, " 128 printk(KERN_DEBUG "%s: Restarting PNIC2 autonegotiation, csr14=%08x\n",
129 "csr14=%8.8x.\n", dev->name, csr14); 129 dev->name, csr14);
130 130
131 /* tell pnic2_lnk_change we are doing an nway negotiation */ 131 /* tell pnic2_lnk_change we are doing an nway negotiation */
132 dev->if_port = 0; 132 dev->if_port = 0;
@@ -137,8 +137,8 @@ void pnic2_start_nway(struct net_device *dev)
137 137
138 tp->csr6 = ioread32(ioaddr + CSR6); 138 tp->csr6 = ioread32(ioaddr + CSR6);
139 if (tulip_debug > 1) 139 if (tulip_debug > 1)
140 printk(KERN_DEBUG "%s: On Entry to Nway, " 140 printk(KERN_DEBUG "%s: On Entry to Nway, csr6=%08x\n",
141 "csr6=%8.8x.\n", dev->name, tp->csr6); 141 dev->name, tp->csr6);
142 142
143 /* mask off any bits not to touch 143 /* mask off any bits not to touch
144 * comment at top of file explains mask value 144 * comment at top of file explains mask value
@@ -181,9 +181,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
181 int csr12 = ioread32(ioaddr + CSR12); 181 int csr12 = ioread32(ioaddr + CSR12);
182 182
183 if (tulip_debug > 1) 183 if (tulip_debug > 1)
184 printk(KERN_INFO"%s: PNIC2 link status interrupt %8.8x, " 184 dev_info(&dev->dev,
185 " CSR5 %x, %8.8x.\n", dev->name, csr12, 185 "PNIC2 link status interrupt %08x, CSR5 %x, %08x\n",
186 csr5, ioread32(ioaddr + CSR14)); 186 csr12, csr5, ioread32(ioaddr + CSR14));
187 187
188 /* If NWay finished and we have a negotiated partner capability. 188 /* If NWay finished and we have a negotiated partner capability.
189 * check bits 14:12 for bit pattern 101 - all is good 189 * check bits 14:12 for bit pattern 101 - all is good
@@ -215,9 +215,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
215 else if (negotiated & 0x0020) dev->if_port = 0; 215 else if (negotiated & 0x0020) dev->if_port = 0;
216 else { 216 else {
217 if (tulip_debug > 1) 217 if (tulip_debug > 1)
218 printk(KERN_INFO "%s: funny autonegotiate result " 218 dev_info(&dev->dev,
219 "csr12 %8.8x advertising %4.4x\n", 219 "funny autonegotiate result csr12 %08x advertising %04x\n",
220 dev->name, csr12, tp->sym_advertise); 220 csr12, tp->sym_advertise);
221 tp->nwayset = 0; 221 tp->nwayset = 0;
222 /* so check if 100baseTx link state is okay */ 222 /* so check if 100baseTx link state is okay */
223 if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180)) 223 if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180))
@@ -231,10 +231,11 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
231 231
232 if (tulip_debug > 1) { 232 if (tulip_debug > 1) {
233 if (tp->nwayset) 233 if (tp->nwayset)
234 printk(KERN_INFO "%s: Switching to %s based on link " 234 dev_info(&dev->dev,
235 "negotiation %4.4x & %4.4x = %4.4x.\n", 235 "Switching to %s based on link negotiation %04x & %04x = %04x\n",
236 dev->name, medianame[dev->if_port], 236 medianame[dev->if_port],
237 tp->sym_advertise, tp->lpar, negotiated); 237 tp->sym_advertise, tp->lpar,
238 negotiated);
238 } 239 }
239 240
240 /* remember to turn off bit 7 - autonegotiate 241 /* remember to turn off bit 7 - autonegotiate
@@ -270,9 +271,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
270 iowrite32(1, ioaddr + CSR13); 271 iowrite32(1, ioaddr + CSR13);
271 272
272 if (tulip_debug > 2) 273 if (tulip_debug > 2)
273 printk(KERN_DEBUG "%s: Setting CSR6 %8.8x/%x CSR12 " 274 printk(KERN_DEBUG "%s: Setting CSR6 %08x/%x CSR12 %08x\n",
274 "%8.8x.\n", dev->name, tp->csr6, 275 dev->name, tp->csr6,
275 ioread32(ioaddr + CSR6), ioread32(ioaddr + CSR12)); 276 ioread32(ioaddr + CSR6), ioread32(ioaddr + CSR12));
276 277
277 /* now the following actually writes out the 278 /* now the following actually writes out the
278 * new csr6 values 279 * new csr6 values
@@ -282,9 +283,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
282 return; 283 return;
283 284
284 } else { 285 } else {
285 printk(KERN_INFO "%s: Autonegotiation failed, " 286 dev_info(&dev->dev,
286 "using %s, link beat status %4.4x.\n", 287 "Autonegotiation failed, using %s, link beat status %04x\n",
287 dev->name, medianame[dev->if_port], csr12); 288 medianame[dev->if_port], csr12);
288 289
289 /* remember to turn off bit 7 - autonegotiate 290 /* remember to turn off bit 7 - autonegotiate
290 * enable so we don't forget 291 * enable so we don't forget
@@ -339,9 +340,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
339 /* we are at 100mb and a potential link change occurred */ 340 /* we are at 100mb and a potential link change occurred */
340 341
341 if (tulip_debug > 1) 342 if (tulip_debug > 1)
342 printk(KERN_INFO"%s: PNIC2 %s link beat %s.\n", 343 dev_info(&dev->dev, "PNIC2 %s link beat %s\n",
343 dev->name, medianame[dev->if_port], 344 medianame[dev->if_port],
344 (csr12 & 2) ? "failed" : "good"); 345 (csr12 & 2) ? "failed" : "good");
345 346
346 /* check 100 link beat */ 347 /* check 100 link beat */
347 348
@@ -364,9 +365,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
364 /* we are at 10mb and a potential link change occurred */ 365 /* we are at 10mb and a potential link change occurred */
365 366
366 if (tulip_debug > 1) 367 if (tulip_debug > 1)
367 printk(KERN_INFO"%s: PNIC2 %s link beat %s.\n", 368 dev_info(&dev->dev, "PNIC2 %s link beat %s\n",
368 dev->name, medianame[dev->if_port], 369 medianame[dev->if_port],
369 (csr12 & 4) ? "failed" : "good"); 370 (csr12 & 4) ? "failed" : "good");
370 371
371 372
372 tp->nway = 0; 373 tp->nway = 0;
@@ -385,7 +386,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
385 386
386 387
387 if (tulip_debug > 1) 388 if (tulip_debug > 1)
388 printk(KERN_INFO"%s: PNIC2 Link Change Default?\n",dev->name); 389 dev_info(&dev->dev, "PNIC2 Link Change Default?\n");
389 390
390 /* if all else fails default to trying 10baseT-HD */ 391 /* if all else fails default to trying 10baseT-HD */
391 dev->if_port = 0; 392 dev->if_port = 0;
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c
index a0e084223082..36c2725ec886 100644
--- a/drivers/net/tulip/timer.c
+++ b/drivers/net/tulip/timer.c
@@ -28,11 +28,11 @@ void tulip_media_task(struct work_struct *work)
28 unsigned long flags; 28 unsigned long flags;
29 29
30 if (tulip_debug > 2) { 30 if (tulip_debug > 2) {
31 printk(KERN_DEBUG "%s: Media selection tick, %s, status %8.8x mode" 31 printk(KERN_DEBUG "%s: Media selection tick, %s, status %08x mode %08x SIA %08x %08x %08x %08x\n",
32 " %8.8x SIA %8.8x %8.8x %8.8x %8.8x.\n", 32 dev->name, medianame[dev->if_port],
33 dev->name, medianame[dev->if_port], ioread32(ioaddr + CSR5), 33 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR6),
34 ioread32(ioaddr + CSR6), csr12, ioread32(ioaddr + CSR13), 34 csr12, ioread32(ioaddr + CSR13),
35 ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15)); 35 ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
36 } 36 }
37 switch (tp->chip_id) { 37 switch (tp->chip_id) {
38 case DC21140: 38 case DC21140:
@@ -48,9 +48,9 @@ void tulip_media_task(struct work_struct *work)
48 Assume this a generic MII or SYM transceiver. */ 48 Assume this a generic MII or SYM transceiver. */
49 next_tick = 60*HZ; 49 next_tick = 60*HZ;
50 if (tulip_debug > 2) 50 if (tulip_debug > 2)
51 printk(KERN_DEBUG "%s: network media monitor CSR6 %8.8x " 51 printk(KERN_DEBUG "%s: network media monitor CSR6 %08x CSR12 0x%02x\n",
52 "CSR12 0x%2.2x.\n", 52 dev->name,
53 dev->name, ioread32(ioaddr + CSR6), csr12 & 0xff); 53 ioread32(ioaddr + CSR6), csr12 & 0xff);
54 break; 54 break;
55 } 55 }
56 mleaf = &tp->mtable->mleaf[tp->cur_index]; 56 mleaf = &tp->mtable->mleaf[tp->cur_index];
@@ -62,9 +62,8 @@ void tulip_media_task(struct work_struct *work)
62 s8 bitnum = p[offset]; 62 s8 bitnum = p[offset];
63 if (p[offset+1] & 0x80) { 63 if (p[offset+1] & 0x80) {
64 if (tulip_debug > 1) 64 if (tulip_debug > 1)
65 printk(KERN_DEBUG"%s: Transceiver monitor tick " 65 printk(KERN_DEBUG "%s: Transceiver monitor tick CSR12=%#02x, no media sense\n",
66 "CSR12=%#2.2x, no media sense.\n", 66 dev->name, csr12);
67 dev->name, csr12);
68 if (mleaf->type == 4) { 67 if (mleaf->type == 4) {
69 if (mleaf->media == 3 && (csr12 & 0x02)) 68 if (mleaf->media == 3 && (csr12 & 0x02))
70 goto select_next_media; 69 goto select_next_media;
@@ -72,16 +71,16 @@ void tulip_media_task(struct work_struct *work)
72 break; 71 break;
73 } 72 }
74 if (tulip_debug > 2) 73 if (tulip_debug > 2)
75 printk(KERN_DEBUG "%s: Transceiver monitor tick: CSR12=%#2.2x" 74 printk(KERN_DEBUG "%s: Transceiver monitor tick: CSR12=%#02x bit %d is %d, expecting %d\n",
76 " bit %d is %d, expecting %d.\n", 75 dev->name, csr12, (bitnum >> 1) & 7,
77 dev->name, csr12, (bitnum >> 1) & 7, 76 (csr12 & (1 << ((bitnum >> 1) & 7))) != 0,
78 (csr12 & (1 << ((bitnum >> 1) & 7))) != 0, 77 (bitnum >= 0));
79 (bitnum >= 0));
80 /* Check that the specified bit has the proper value. */ 78 /* Check that the specified bit has the proper value. */
81 if ((bitnum < 0) != 79 if ((bitnum < 0) !=
82 ((csr12 & (1 << ((bitnum >> 1) & 7))) != 0)) { 80 ((csr12 & (1 << ((bitnum >> 1) & 7))) != 0)) {
83 if (tulip_debug > 2) 81 if (tulip_debug > 2)
84 printk(KERN_DEBUG "%s: Link beat detected for %s.\n", dev->name, 82 printk(KERN_DEBUG "%s: Link beat detected for %s\n",
83 dev->name,
85 medianame[mleaf->media & MEDIA_MASK]); 84 medianame[mleaf->media & MEDIA_MASK]);
86 if ((p[2] & 0x61) == 0x01) /* Bogus Znyx board. */ 85 if ((p[2] & 0x61) == 0x01) /* Bogus Znyx board. */
87 goto actually_mii; 86 goto actually_mii;
@@ -100,9 +99,9 @@ void tulip_media_task(struct work_struct *work)
100 if (tulip_media_cap[dev->if_port] & MediaIsFD) 99 if (tulip_media_cap[dev->if_port] & MediaIsFD)
101 goto select_next_media; /* Skip FD entries. */ 100 goto select_next_media; /* Skip FD entries. */
102 if (tulip_debug > 1) 101 if (tulip_debug > 1)
103 printk(KERN_DEBUG "%s: No link beat on media %s," 102 printk(KERN_DEBUG "%s: No link beat on media %s, trying transceiver type %s\n",
104 " trying transceiver type %s.\n", 103 dev->name,
105 dev->name, medianame[mleaf->media & MEDIA_MASK], 104 medianame[mleaf->media & MEDIA_MASK],
106 medianame[tp->mtable->mleaf[tp->cur_index].media]); 105 medianame[tp->mtable->mleaf[tp->cur_index].media]);
107 tulip_select_media(dev, 0); 106 tulip_select_media(dev, 0);
108 /* Restart the transmit process. */ 107 /* Restart the transmit process. */
@@ -151,8 +150,8 @@ void mxic_timer(unsigned long data)
151 int next_tick = 60*HZ; 150 int next_tick = 60*HZ;
152 151
153 if (tulip_debug > 3) { 152 if (tulip_debug > 3) {
154 printk(KERN_INFO"%s: MXIC negotiation status %8.8x.\n", dev->name, 153 dev_info(&dev->dev, "MXIC negotiation status %08x\n",
155 ioread32(ioaddr + CSR12)); 154 ioread32(ioaddr + CSR12));
156 } 155 }
157 if (next_tick) { 156 if (next_tick) {
158 mod_timer(&tp->timer, RUN_AT(next_tick)); 157 mod_timer(&tp->timer, RUN_AT(next_tick));
@@ -167,11 +166,10 @@ void comet_timer(unsigned long data)
167 int next_tick = 60*HZ; 166 int next_tick = 60*HZ;
168 167
169 if (tulip_debug > 1) 168 if (tulip_debug > 1)
170 printk(KERN_DEBUG "%s: Comet link status %4.4x partner capability " 169 printk(KERN_DEBUG "%s: Comet link status %04x partner capability %04x\n",
171 "%4.4x.\n", 170 dev->name,
172 dev->name, 171 tulip_mdio_read(dev, tp->phys[0], 1),
173 tulip_mdio_read(dev, tp->phys[0], 1), 172 tulip_mdio_read(dev, tp->phys[0], 5));
174 tulip_mdio_read(dev, tp->phys[0], 5));
175 /* mod_timer synchronizes us with potential add_timer calls 173 /* mod_timer synchronizes us with potential add_timer calls
176 * from interrupts. 174 * from interrupts.
177 */ 175 */
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 20696b5d60a5..7f544ef2f5fc 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -41,7 +41,6 @@
41static char version[] __devinitdata = 41static char version[] __devinitdata =
42 "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n"; 42 "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
43 43
44
45/* A few user-configurable values. */ 44/* A few user-configurable values. */
46 45
47/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ 46/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
@@ -211,7 +210,7 @@ struct tulip_chip_table tulip_tbl[] = {
211}; 210};
212 211
213 212
214static struct pci_device_id tulip_pci_tbl[] = { 213static DEFINE_PCI_DEVICE_TABLE(tulip_pci_tbl) = {
215 { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 }, 214 { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
216 { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 }, 215 { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
217 { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 }, 216 { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
@@ -326,7 +325,8 @@ static void tulip_up(struct net_device *dev)
326 udelay(100); 325 udelay(100);
327 326
328 if (tulip_debug > 1) 327 if (tulip_debug > 1)
329 printk(KERN_DEBUG "%s: tulip_up(), irq==%d.\n", dev->name, dev->irq); 328 printk(KERN_DEBUG "%s: tulip_up(), irq==%d\n",
329 dev->name, dev->irq);
330 330
331 iowrite32(tp->rx_ring_dma, ioaddr + CSR3); 331 iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
332 iowrite32(tp->tx_ring_dma, ioaddr + CSR4); 332 iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
@@ -387,8 +387,9 @@ static void tulip_up(struct net_device *dev)
387 (dev->if_port == 12 ? 0 : dev->if_port); 387 (dev->if_port == 12 ? 0 : dev->if_port);
388 for (i = 0; i < tp->mtable->leafcount; i++) 388 for (i = 0; i < tp->mtable->leafcount; i++)
389 if (tp->mtable->mleaf[i].media == looking_for) { 389 if (tp->mtable->mleaf[i].media == looking_for) {
390 printk(KERN_INFO "%s: Using user-specified media %s.\n", 390 dev_info(&dev->dev,
391 dev->name, medianame[dev->if_port]); 391 "Using user-specified media %s\n",
392 medianame[dev->if_port]);
392 goto media_picked; 393 goto media_picked;
393 } 394 }
394 } 395 }
@@ -396,8 +397,9 @@ static void tulip_up(struct net_device *dev)
396 int looking_for = tp->mtable->defaultmedia & MEDIA_MASK; 397 int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
397 for (i = 0; i < tp->mtable->leafcount; i++) 398 for (i = 0; i < tp->mtable->leafcount; i++)
398 if (tp->mtable->mleaf[i].media == looking_for) { 399 if (tp->mtable->mleaf[i].media == looking_for) {
399 printk(KERN_INFO "%s: Using EEPROM-set media %s.\n", 400 dev_info(&dev->dev,
400 dev->name, medianame[looking_for]); 401 "Using EEPROM-set media %s\n",
402 medianame[looking_for]);
401 goto media_picked; 403 goto media_picked;
402 } 404 }
403 } 405 }
@@ -424,9 +426,10 @@ media_picked:
424 if (tp->mii_cnt) { 426 if (tp->mii_cnt) {
425 tulip_select_media(dev, 1); 427 tulip_select_media(dev, 1);
426 if (tulip_debug > 1) 428 if (tulip_debug > 1)
427 printk(KERN_INFO "%s: Using MII transceiver %d, status " 429 dev_info(&dev->dev,
428 "%4.4x.\n", 430 "Using MII transceiver %d, status %04x\n",
429 dev->name, tp->phys[0], tulip_mdio_read(dev, tp->phys[0], 1)); 431 tp->phys[0],
432 tulip_mdio_read(dev, tp->phys[0], 1));
430 iowrite32(csr6_mask_defstate, ioaddr + CSR6); 433 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
431 tp->csr6 = csr6_mask_hdcap; 434 tp->csr6 = csr6_mask_hdcap;
432 dev->if_port = 11; 435 dev->if_port = 11;
@@ -490,9 +493,10 @@ media_picked:
490 iowrite32(0, ioaddr + CSR2); /* Rx poll demand */ 493 iowrite32(0, ioaddr + CSR2); /* Rx poll demand */
491 494
492 if (tulip_debug > 2) { 495 if (tulip_debug > 2) {
493 printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n", 496 printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
494 dev->name, ioread32(ioaddr + CSR0), ioread32(ioaddr + CSR5), 497 dev->name, ioread32(ioaddr + CSR0),
495 ioread32(ioaddr + CSR6)); 498 ioread32(ioaddr + CSR5),
499 ioread32(ioaddr + CSR6));
496 } 500 }
497 501
498 /* Set the timer to switch to check for link beat and perhaps switch 502 /* Set the timer to switch to check for link beat and perhaps switch
@@ -540,27 +544,30 @@ static void tulip_tx_timeout(struct net_device *dev)
540 if (tulip_media_cap[dev->if_port] & MediaIsMII) { 544 if (tulip_media_cap[dev->if_port] & MediaIsMII) {
541 /* Do nothing -- the media monitor should handle this. */ 545 /* Do nothing -- the media monitor should handle this. */
542 if (tulip_debug > 1) 546 if (tulip_debug > 1)
543 printk(KERN_WARNING "%s: Transmit timeout using MII device.\n", 547 dev_warn(&dev->dev,
544 dev->name); 548 "Transmit timeout using MII device\n");
545 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 || 549 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
546 tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 || 550 tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
547 tp->chip_id == DM910X) { 551 tp->chip_id == DM910X) {
548 printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, " 552 dev_warn(&dev->dev,
549 "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n", 553 "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
550 dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12), 554 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
551 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15)); 555 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
556 ioread32(ioaddr + CSR15));
552 tp->timeout_recovery = 1; 557 tp->timeout_recovery = 1;
553 schedule_work(&tp->media_work); 558 schedule_work(&tp->media_work);
554 goto out_unlock; 559 goto out_unlock;
555 } else if (tp->chip_id == PNIC2) { 560 } else if (tp->chip_id == PNIC2) {
556 printk(KERN_WARNING "%s: PNIC2 transmit timed out, status %8.8x, " 561 dev_warn(&dev->dev,
557 "CSR6/7 %8.8x / %8.8x CSR12 %8.8x, resetting...\n", 562 "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
558 dev->name, (int)ioread32(ioaddr + CSR5), (int)ioread32(ioaddr + CSR6), 563 (int)ioread32(ioaddr + CSR5),
559 (int)ioread32(ioaddr + CSR7), (int)ioread32(ioaddr + CSR12)); 564 (int)ioread32(ioaddr + CSR6),
565 (int)ioread32(ioaddr + CSR7),
566 (int)ioread32(ioaddr + CSR12));
560 } else { 567 } else {
561 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x, CSR12 " 568 dev_warn(&dev->dev,
562 "%8.8x, resetting...\n", 569 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
563 dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12)); 570 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
564 dev->if_port = 0; 571 dev->if_port = 0;
565 } 572 }
566 573
@@ -570,26 +577,26 @@ static void tulip_tx_timeout(struct net_device *dev)
570 for (i = 0; i < RX_RING_SIZE; i++) { 577 for (i = 0; i < RX_RING_SIZE; i++) {
571 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1); 578 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
572 int j; 579 int j;
573 printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x " 580 printk(KERN_DEBUG
574 "%2.2x %2.2x %2.2x.\n", 581 "%2d: %08x %08x %08x %08x %02x %02x %02x\n",
575 i, (unsigned int)tp->rx_ring[i].status, 582 i,
576 (unsigned int)tp->rx_ring[i].length, 583 (unsigned int)tp->rx_ring[i].status,
577 (unsigned int)tp->rx_ring[i].buffer1, 584 (unsigned int)tp->rx_ring[i].length,
578 (unsigned int)tp->rx_ring[i].buffer2, 585 (unsigned int)tp->rx_ring[i].buffer1,
579 buf[0], buf[1], buf[2]); 586 (unsigned int)tp->rx_ring[i].buffer2,
587 buf[0], buf[1], buf[2]);
580 for (j = 0; buf[j] != 0xee && j < 1600; j++) 588 for (j = 0; buf[j] != 0xee && j < 1600; j++)
581 if (j < 100) 589 if (j < 100)
582 printk(KERN_CONT " %2.2x", buf[j]); 590 pr_cont(" %02x", buf[j]);
583 printk(KERN_CONT " j=%d.\n", j); 591 pr_cont(" j=%d\n", j);
584 } 592 }
585 printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring); 593 printk(KERN_DEBUG " Rx ring %08x: ", (int)tp->rx_ring);
586 for (i = 0; i < RX_RING_SIZE; i++) 594 for (i = 0; i < RX_RING_SIZE; i++)
587 printk(KERN_CONT " %8.8x", 595 pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
588 (unsigned int)tp->rx_ring[i].status); 596 printk(KERN_DEBUG " Tx ring %08x: ", (int)tp->tx_ring);
589 printk(KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
590 for (i = 0; i < TX_RING_SIZE; i++) 597 for (i = 0; i < TX_RING_SIZE; i++)
591 printk(KERN_CONT " %8.8x", (unsigned int)tp->tx_ring[i].status); 598 pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
592 printk(KERN_CONT "\n"); 599 pr_cont("\n");
593 } 600 }
594#endif 601#endif
595 602
@@ -832,8 +839,9 @@ static int tulip_close (struct net_device *dev)
832 tulip_down (dev); 839 tulip_down (dev);
833 840
834 if (tulip_debug > 1) 841 if (tulip_debug > 1)
835 printk (KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", 842 dev_printk(KERN_DEBUG, &dev->dev,
836 dev->name, ioread32 (ioaddr + CSR5)); 843 "Shutting down ethercard, status was %02x\n",
844 ioread32 (ioaddr + CSR5));
837 845
838 free_irq (dev->irq, dev); 846 free_irq (dev->irq, dev);
839 847
@@ -989,12 +997,10 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
989 memset(hash_table, 0, sizeof(hash_table)); 997 memset(hash_table, 0, sizeof(hash_table));
990 set_bit_le(255, hash_table); /* Broadcast entry */ 998 set_bit_le(255, hash_table); /* Broadcast entry */
991 /* This should work on big-endian machines as well. */ 999 /* This should work on big-endian machines as well. */
992 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1000 netdev_for_each_mc_addr(mclist, dev) {
993 i++, mclist = mclist->next) {
994 int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff; 1001 int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
995 1002
996 set_bit_le(index, hash_table); 1003 set_bit_le(index, hash_table);
997
998 } 1004 }
999 for (i = 0; i < 32; i++) { 1005 for (i = 0; i < 32; i++) {
1000 *setup_frm++ = hash_table[i]; 1006 *setup_frm++ = hash_table[i];
@@ -1013,20 +1019,18 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1013{ 1019{
1014 struct tulip_private *tp = netdev_priv(dev); 1020 struct tulip_private *tp = netdev_priv(dev);
1015 struct dev_mc_list *mclist; 1021 struct dev_mc_list *mclist;
1016 int i;
1017 u16 *eaddrs; 1022 u16 *eaddrs;
1018 1023
1019 /* We have <= 14 addresses so we can use the wonderful 1024 /* We have <= 14 addresses so we can use the wonderful
1020 16 address perfect filtering of the Tulip. */ 1025 16 address perfect filtering of the Tulip. */
1021 for (i = 0, mclist = dev->mc_list; i < dev->mc_count; 1026 netdev_for_each_mc_addr(mclist, dev) {
1022 i++, mclist = mclist->next) {
1023 eaddrs = (u16 *)mclist->dmi_addr; 1027 eaddrs = (u16 *)mclist->dmi_addr;
1024 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; 1028 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1025 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; 1029 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1026 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; 1030 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1027 } 1031 }
1028 /* Fill the unused entries with the broadcast address. */ 1032 /* Fill the unused entries with the broadcast address. */
1029 memset(setup_frm, 0xff, (15-i)*12); 1033 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1030 setup_frm = &tp->setup_frame[15*6]; 1034 setup_frm = &tp->setup_frame[15*6];
1031 1035
1032 /* Fill the final entry with our physical address. */ 1036 /* Fill the final entry with our physical address. */
@@ -1049,7 +1053,8 @@ static void set_rx_mode(struct net_device *dev)
1049 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1053 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1050 tp->csr6 |= AcceptAllMulticast | AcceptAllPhys; 1054 tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1051 csr6 |= AcceptAllMulticast | AcceptAllPhys; 1055 csr6 |= AcceptAllMulticast | AcceptAllPhys;
1052 } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) { 1056 } else if ((netdev_mc_count(dev) > 1000) ||
1057 (dev->flags & IFF_ALLMULTI)) {
1053 /* Too many to filter well -- accept all multicasts. */ 1058 /* Too many to filter well -- accept all multicasts. */
1054 tp->csr6 |= AcceptAllMulticast; 1059 tp->csr6 |= AcceptAllMulticast;
1055 csr6 |= AcceptAllMulticast; 1060 csr6 |= AcceptAllMulticast;
@@ -1057,15 +1062,14 @@ static void set_rx_mode(struct net_device *dev)
1057 /* Some work-alikes have only a 64-entry hash filter table. */ 1062 /* Some work-alikes have only a 64-entry hash filter table. */
1058 /* Should verify correctness on big-endian/__powerpc__ */ 1063 /* Should verify correctness on big-endian/__powerpc__ */
1059 struct dev_mc_list *mclist; 1064 struct dev_mc_list *mclist;
1060 int i; 1065 if (netdev_mc_count(dev) > 64) {
1061 if (dev->mc_count > 64) { /* Arbitrary non-effective limit. */ 1066 /* Arbitrary non-effective limit. */
1062 tp->csr6 |= AcceptAllMulticast; 1067 tp->csr6 |= AcceptAllMulticast;
1063 csr6 |= AcceptAllMulticast; 1068 csr6 |= AcceptAllMulticast;
1064 } else { 1069 } else {
1065 u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */ 1070 u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
1066 int filterbit; 1071 int filterbit;
1067 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1072 netdev_for_each_mc_addr(mclist, dev) {
1068 i++, mclist = mclist->next) {
1069 if (tp->flags & COMET_MAC_ADDR) 1073 if (tp->flags & COMET_MAC_ADDR)
1070 filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr); 1074 filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
1071 else 1075 else
@@ -1073,10 +1077,10 @@ static void set_rx_mode(struct net_device *dev)
1073 filterbit &= 0x3f; 1077 filterbit &= 0x3f;
1074 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); 1078 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1075 if (tulip_debug > 2) 1079 if (tulip_debug > 2)
1076 printk(KERN_INFO "%s: Added filter for %pM" 1080 dev_info(&dev->dev,
1077 " %8.8x bit %d.\n", 1081 "Added filter for %pM %08x bit %d\n",
1078 dev->name, mclist->dmi_addr, 1082 mclist->dmi_addr,
1079 ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit); 1083 ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
1080 } 1084 }
1081 if (mc_filter[0] == tp->mc_filter[0] && 1085 if (mc_filter[0] == tp->mc_filter[0] &&
1082 mc_filter[1] == tp->mc_filter[1]) 1086 mc_filter[1] == tp->mc_filter[1])
@@ -1099,7 +1103,8 @@ static void set_rx_mode(struct net_device *dev)
1099 1103
1100 /* Note that only the low-address shortword of setup_frame is valid! 1104 /* Note that only the low-address shortword of setup_frame is valid!
1101 The values are doubled for big-endian architectures. */ 1105 The values are doubled for big-endian architectures. */
1102 if (dev->mc_count > 14) { /* Must use a multicast hash table. */ 1106 if (netdev_mc_count(dev) > 14) {
1107 /* Must use a multicast hash table. */
1103 build_setup_frame_hash(tp->setup_frame, dev); 1108 build_setup_frame_hash(tp->setup_frame, dev);
1104 tx_flags = 0x08400000 | 192; 1109 tx_flags = 0x08400000 | 192;
1105 } else { 1110 } else {
@@ -1288,9 +1293,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1288 unsigned int force_csr0 = 0; 1293 unsigned int force_csr0 = 0;
1289 1294
1290#ifndef MODULE 1295#ifndef MODULE
1291 static int did_version; /* Already printed version info. */ 1296 if (tulip_debug > 0)
1292 if (tulip_debug > 0 && did_version++ == 0) 1297 printk_once(KERN_INFO "%s", version);
1293 printk (KERN_INFO "%s", version);
1294#endif 1298#endif
1295 1299
1296 board_idx++; 1300 board_idx++;
@@ -1301,7 +1305,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1301 */ 1305 */
1302 1306
1303 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) { 1307 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1304 printk (KERN_ERR PFX "skipping LMC card.\n"); 1308 pr_err(PFX "skipping LMC card\n");
1305 return -ENODEV; 1309 return -ENODEV;
1306 } 1310 }
1307 1311
@@ -1317,15 +1321,13 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1317 1321
1318 if (pdev->vendor == 0x1282 && pdev->device == 0x9100 && 1322 if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1319 pdev->revision < 0x30) { 1323 pdev->revision < 0x30) {
1320 printk(KERN_INFO PFX 1324 pr_info(PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
1321 "skipping early DM9100 with Crc bug (use dmfe)\n");
1322 return -ENODEV; 1325 return -ENODEV;
1323 } 1326 }
1324 1327
1325 dp = pci_device_to_OF_node(pdev); 1328 dp = pci_device_to_OF_node(pdev);
1326 if (!(dp && of_get_property(dp, "local-mac-address", NULL))) { 1329 if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1327 printk(KERN_INFO PFX 1330 pr_info(PFX "skipping DM910x expansion card (use dmfe)\n");
1328 "skipping DM910x expansion card (use dmfe)\n");
1329 return -ENODEV; 1331 return -ENODEV;
1330 } 1332 }
1331 } 1333 }
@@ -1372,9 +1374,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1372 1374
1373 i = pci_enable_device(pdev); 1375 i = pci_enable_device(pdev);
1374 if (i) { 1376 if (i) {
1375 printk (KERN_ERR PFX 1377 pr_err(PFX "Cannot enable tulip board #%d, aborting\n",
1376 "Cannot enable tulip board #%d, aborting\n", 1378 board_idx);
1377 board_idx);
1378 return i; 1379 return i;
1379 } 1380 }
1380 1381
@@ -1383,22 +1384,22 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1383 /* alloc_etherdev ensures aligned and zeroed private structures */ 1384 /* alloc_etherdev ensures aligned and zeroed private structures */
1384 dev = alloc_etherdev (sizeof (*tp)); 1385 dev = alloc_etherdev (sizeof (*tp));
1385 if (!dev) { 1386 if (!dev) {
1386 printk (KERN_ERR PFX "ether device alloc failed, aborting\n"); 1387 pr_err(PFX "ether device alloc failed, aborting\n");
1387 return -ENOMEM; 1388 return -ENOMEM;
1388 } 1389 }
1389 1390
1390 SET_NETDEV_DEV(dev, &pdev->dev); 1391 SET_NETDEV_DEV(dev, &pdev->dev);
1391 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) { 1392 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1392 printk (KERN_ERR PFX "%s: I/O region (0x%llx@0x%llx) too small, " 1393 pr_err(PFX "%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1393 "aborting\n", pci_name(pdev), 1394 pci_name(pdev),
1394 (unsigned long long)pci_resource_len (pdev, 0), 1395 (unsigned long long)pci_resource_len (pdev, 0),
1395 (unsigned long long)pci_resource_start (pdev, 0)); 1396 (unsigned long long)pci_resource_start (pdev, 0));
1396 goto err_out_free_netdev; 1397 goto err_out_free_netdev;
1397 } 1398 }
1398 1399
1399 /* grab all resources from both PIO and MMIO regions, as we 1400 /* grab all resources from both PIO and MMIO regions, as we
1400 * don't want anyone else messing around with our hardware */ 1401 * don't want anyone else messing around with our hardware */
1401 if (pci_request_regions (pdev, "tulip")) 1402 if (pci_request_regions (pdev, DRV_NAME))
1402 goto err_out_free_netdev; 1403 goto err_out_free_netdev;
1403 1404
1404 ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size); 1405 ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
@@ -1611,8 +1612,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1611 if (dev->mem_start & MEDIA_MASK) 1612 if (dev->mem_start & MEDIA_MASK)
1612 tp->default_port = dev->mem_start & MEDIA_MASK; 1613 tp->default_port = dev->mem_start & MEDIA_MASK;
1613 if (tp->default_port) { 1614 if (tp->default_port) {
1614 printk(KERN_INFO "tulip%d: Transceiver selection forced to %s.\n", 1615 pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
1615 board_idx, medianame[tp->default_port & MEDIA_MASK]); 1616 board_idx, medianame[tp->default_port & MEDIA_MASK]);
1616 tp->medialock = 1; 1617 tp->medialock = 1;
1617 if (tulip_media_cap[tp->default_port] & MediaAlwaysFD) 1618 if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1618 tp->full_duplex = 1; 1619 tp->full_duplex = 1;
@@ -1627,7 +1628,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1627 } 1628 }
1628 1629
1629 if (tp->flags & HAS_MEDIA_TABLE) { 1630 if (tp->flags & HAS_MEDIA_TABLE) {
1630 sprintf(dev->name, "tulip%d", board_idx); /* hack */ 1631 sprintf(dev->name, DRV_NAME "%d", board_idx); /* hack */
1631 tulip_parse_eeprom(dev); 1632 tulip_parse_eeprom(dev);
1632 strcpy(dev->name, "eth%d"); /* un-hack */ 1633 strcpy(dev->name, "eth%d"); /* un-hack */
1633 } 1634 }
@@ -1663,20 +1664,18 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1663 if (register_netdev(dev)) 1664 if (register_netdev(dev))
1664 goto err_out_free_ring; 1665 goto err_out_free_ring;
1665 1666
1666 printk(KERN_INFO "%s: %s rev %d at " 1667 pci_set_drvdata(pdev, dev);
1668
1669 dev_info(&dev->dev,
1667#ifdef CONFIG_TULIP_MMIO 1670#ifdef CONFIG_TULIP_MMIO
1668 "MMIO" 1671 "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
1669#else 1672#else
1670 "Port" 1673 "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
1671#endif 1674#endif
1672 " %#llx,", dev->name, chip_name, pdev->revision, 1675 chip_name, pdev->revision,
1673 (unsigned long long) pci_resource_start(pdev, TULIP_BAR)); 1676 (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
1674 pci_set_drvdata(pdev, dev); 1677 eeprom_missing ? " EEPROM not present," : "",
1675 1678 dev->dev_addr, irq);
1676 if (eeprom_missing)
1677 printk(" EEPROM not present,");
1678 printk(" %pM", dev->dev_addr);
1679 printk(", IRQ %d.\n", irq);
1680 1679
1681 if (tp->chip_id == PNIC2) 1680 if (tp->chip_id == PNIC2)
1682 tp->link_change = pnic2_lnk_change; 1681 tp->link_change = pnic2_lnk_change;
@@ -1799,12 +1798,12 @@ static int tulip_resume(struct pci_dev *pdev)
1799 return 0; 1798 return 0;
1800 1799
1801 if ((retval = pci_enable_device(pdev))) { 1800 if ((retval = pci_enable_device(pdev))) {
1802 printk (KERN_ERR "tulip: pci_enable_device failed in resume\n"); 1801 pr_err(PFX "pci_enable_device failed in resume\n");
1803 return retval; 1802 return retval;
1804 } 1803 }
1805 1804
1806 if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) { 1805 if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
1807 printk (KERN_ERR "tulip: request_irq failed in resume\n"); 1806 pr_err(PFX "request_irq failed in resume\n");
1808 return retval; 1807 return retval;
1809 } 1808 }
1810 1809
@@ -1874,7 +1873,7 @@ static struct pci_driver tulip_driver = {
1874static int __init tulip_init (void) 1873static int __init tulip_init (void)
1875{ 1874{
1876#ifdef MODULE 1875#ifdef MODULE
1877 printk (KERN_INFO "%s", version); 1876 pr_info("%s", version);
1878#endif 1877#endif
1879 1878
1880 /* copy module parms into globals */ 1879 /* copy module parms into globals */
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index fa019cabc355..0ab05af237e5 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -12,6 +12,8 @@
12 12
13*/ 13*/
14 14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
15#define DRV_NAME "uli526x" 17#define DRV_NAME "uli526x"
16#define DRV_VERSION "0.9.3" 18#define DRV_VERSION "0.9.3"
17#define DRV_RELDATE "2005-7-29" 19#define DRV_RELDATE "2005-7-29"
@@ -82,9 +84,16 @@
82#define ULI526X_TX_TIMEOUT ((16*HZ)/2) /* tx packet time-out time 8 s" */ 84#define ULI526X_TX_TIMEOUT ((16*HZ)/2) /* tx packet time-out time 8 s" */
83#define ULI526X_TX_KICK (4*HZ/2) /* tx packet Kick-out time 2 s" */ 85#define ULI526X_TX_KICK (4*HZ/2) /* tx packet Kick-out time 2 s" */
84 86
85#define ULI526X_DBUG(dbug_now, msg, value) if (uli526x_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value)) 87#define ULI526X_DBUG(dbug_now, msg, value) \
88do { \
89 if (uli526x_debug || (dbug_now)) \
90 pr_err("%s %lx\n", (msg), (long) (value)); \
91} while (0)
86 92
87#define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half"); 93#define SHOW_MEDIA_TYPE(mode) \
94 pr_err("Change Speed to %sMhz %s duplex\n", \
95 mode & 1 ? "100" : "10", \
96 mode & 4 ? "full" : "half");
88 97
89 98
90/* CR9 definition: SROM/MII */ 99/* CR9 definition: SROM/MII */
@@ -284,7 +293,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
284 SET_NETDEV_DEV(dev, &pdev->dev); 293 SET_NETDEV_DEV(dev, &pdev->dev);
285 294
286 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 295 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
287 printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n"); 296 pr_warning("32-bit PCI DMA not available\n");
288 err = -ENODEV; 297 err = -ENODEV;
289 goto err_out_free; 298 goto err_out_free;
290 } 299 }
@@ -295,19 +304,19 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
295 goto err_out_free; 304 goto err_out_free;
296 305
297 if (!pci_resource_start(pdev, 0)) { 306 if (!pci_resource_start(pdev, 0)) {
298 printk(KERN_ERR DRV_NAME ": I/O base is zero\n"); 307 pr_err("I/O base is zero\n");
299 err = -ENODEV; 308 err = -ENODEV;
300 goto err_out_disable; 309 goto err_out_disable;
301 } 310 }
302 311
303 if (pci_resource_len(pdev, 0) < (ULI526X_IO_SIZE) ) { 312 if (pci_resource_len(pdev, 0) < (ULI526X_IO_SIZE) ) {
304 printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n"); 313 pr_err("Allocated I/O size too small\n");
305 err = -ENODEV; 314 err = -ENODEV;
306 goto err_out_disable; 315 goto err_out_disable;
307 } 316 }
308 317
309 if (pci_request_regions(pdev, DRV_NAME)) { 318 if (pci_request_regions(pdev, DRV_NAME)) {
310 printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n"); 319 pr_err("Failed to request PCI regions\n");
311 err = -ENODEV; 320 err = -ENODEV;
312 goto err_out_disable; 321 goto err_out_disable;
313 } 322 }
@@ -382,9 +391,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
382 if (err) 391 if (err)
383 goto err_out_res; 392 goto err_out_res;
384 393
385 printk(KERN_INFO "%s: ULi M%04lx at pci%s, %pM, irq %d.\n", 394 dev_info(&dev->dev, "ULi M%04lx at pci%s, %pM, irq %d\n",
386 dev->name,ent->driver_data >> 16,pci_name(pdev), 395 ent->driver_data >> 16, pci_name(pdev),
387 dev->dev_addr, dev->irq); 396 dev->dev_addr, dev->irq);
388 397
389 pci_set_master(pdev); 398 pci_set_master(pdev);
390 399
@@ -516,7 +525,7 @@ static void uli526x_init(struct net_device *dev)
516 } 525 }
517 } 526 }
518 if(phy_tmp == 32) 527 if(phy_tmp == 32)
519 printk(KERN_WARNING "Can not find the phy address!!!"); 528 pr_warning("Can not find the phy address!!!");
520 /* Parser SROM and media mode */ 529 /* Parser SROM and media mode */
521 db->media_mode = uli526x_media_mode; 530 db->media_mode = uli526x_media_mode;
522 531
@@ -548,7 +557,7 @@ static void uli526x_init(struct net_device *dev)
548 update_cr6(db->cr6_data, ioaddr); 557 update_cr6(db->cr6_data, ioaddr);
549 558
550 /* Send setup frame */ 559 /* Send setup frame */
551 send_filter_frame(dev, dev->mc_count); /* M5261/M5263 */ 560 send_filter_frame(dev, netdev_mc_count(dev)); /* M5261/M5263 */
552 561
553 /* Init CR7, interrupt active bit */ 562 /* Init CR7, interrupt active bit */
554 db->cr7_data = CR7_DEFAULT; 563 db->cr7_data = CR7_DEFAULT;
@@ -582,7 +591,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
582 591
583 /* Too large packet check */ 592 /* Too large packet check */
584 if (skb->len > MAX_PACKET_SIZE) { 593 if (skb->len > MAX_PACKET_SIZE) {
585 printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len); 594 pr_err("big packet = %d\n", (u16)skb->len);
586 dev_kfree_skb(skb); 595 dev_kfree_skb(skb);
587 return NETDEV_TX_OK; 596 return NETDEV_TX_OK;
588 } 597 }
@@ -592,7 +601,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
592 /* No Tx resource check, it never happen nromally */ 601 /* No Tx resource check, it never happen nromally */
593 if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) { 602 if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) {
594 spin_unlock_irqrestore(&db->lock, flags); 603 spin_unlock_irqrestore(&db->lock, flags);
595 printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_packet_cnt); 604 pr_err("No Tx resource %ld\n", db->tx_packet_cnt);
596 return NETDEV_TX_BUSY; 605 return NETDEV_TX_BUSY;
597 } 606 }
598 607
@@ -897,16 +906,18 @@ static void uli526x_set_filter_mode(struct net_device * dev)
897 return; 906 return;
898 } 907 }
899 908
900 if (dev->flags & IFF_ALLMULTI || dev->mc_count > ULI5261_MAX_MULTICAST) { 909 if (dev->flags & IFF_ALLMULTI ||
901 ULI526X_DBUG(0, "Pass all multicast address", dev->mc_count); 910 netdev_mc_count(dev) > ULI5261_MAX_MULTICAST) {
911 ULI526X_DBUG(0, "Pass all multicast address",
912 netdev_mc_count(dev));
902 db->cr6_data &= ~(CR6_PM | CR6_PBF); 913 db->cr6_data &= ~(CR6_PM | CR6_PBF);
903 db->cr6_data |= CR6_PAM; 914 db->cr6_data |= CR6_PAM;
904 spin_unlock_irqrestore(&db->lock, flags); 915 spin_unlock_irqrestore(&db->lock, flags);
905 return; 916 return;
906 } 917 }
907 918
908 ULI526X_DBUG(0, "Set multicast address", dev->mc_count); 919 ULI526X_DBUG(0, "Set multicast address", netdev_mc_count(dev));
909 send_filter_frame(dev, dev->mc_count); /* M5261/M5263 */ 920 send_filter_frame(dev, netdev_mc_count(dev)); /* M5261/M5263 */
910 spin_unlock_irqrestore(&db->lock, flags); 921 spin_unlock_irqrestore(&db->lock, flags);
911} 922}
912 923
@@ -1058,7 +1069,7 @@ static void uli526x_timer(unsigned long data)
1058 /* Link Failed */ 1069 /* Link Failed */
1059 ULI526X_DBUG(0, "Link Failed", tmp_cr12); 1070 ULI526X_DBUG(0, "Link Failed", tmp_cr12);
1060 netif_carrier_off(dev); 1071 netif_carrier_off(dev);
1061 printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name); 1072 pr_info("%s NIC Link is Down\n",dev->name);
1062 db->link_failed = 1; 1073 db->link_failed = 1;
1063 1074
1064 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ 1075 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
@@ -1090,11 +1101,11 @@ static void uli526x_timer(unsigned long data)
1090 } 1101 }
1091 if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD) 1102 if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
1092 { 1103 {
1093 printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Full duplex\n",dev->name,TmpSpeed); 1104 pr_info("%s NIC Link is Up %d Mbps Full duplex\n",dev->name,TmpSpeed);
1094 } 1105 }
1095 else 1106 else
1096 { 1107 {
1097 printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Half duplex\n",dev->name,TmpSpeed); 1108 pr_info("%s NIC Link is Up %d Mbps Half duplex\n",dev->name,TmpSpeed);
1098 } 1109 }
1099 netif_carrier_on(dev); 1110 netif_carrier_on(dev);
1100 } 1111 }
@@ -1104,7 +1115,7 @@ static void uli526x_timer(unsigned long data)
1104 { 1115 {
1105 if(db->init==1) 1116 if(db->init==1)
1106 { 1117 {
1107 printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name); 1118 pr_info("%s NIC Link is Down\n",dev->name);
1108 netif_carrier_off(dev); 1119 netif_carrier_off(dev);
1109 } 1120 }
1110 } 1121 }
@@ -1230,8 +1241,7 @@ static int uli526x_resume(struct pci_dev *pdev)
1230 1241
1231 err = pci_set_power_state(pdev, PCI_D0); 1242 err = pci_set_power_state(pdev, PCI_D0);
1232 if (err) { 1243 if (err) {
1233 printk(KERN_WARNING "%s: Could not put device into D0\n", 1244 dev_warn(&dev->dev, "Could not put device into D0\n");
1234 dev->name);
1235 return err; 1245 return err;
1236 } 1246 }
1237 1247
@@ -1405,14 +1415,14 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
1405 *suptr++ = 0xffff << FLT_SHIFT; 1415 *suptr++ = 0xffff << FLT_SHIFT;
1406 1416
1407 /* fit the multicast address */ 1417 /* fit the multicast address */
1408 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) { 1418 netdev_for_each_mc_addr(mcptr, dev) {
1409 addrptr = (u16 *) mcptr->dmi_addr; 1419 addrptr = (u16 *) mcptr->dmi_addr;
1410 *suptr++ = addrptr[0] << FLT_SHIFT; 1420 *suptr++ = addrptr[0] << FLT_SHIFT;
1411 *suptr++ = addrptr[1] << FLT_SHIFT; 1421 *suptr++ = addrptr[1] << FLT_SHIFT;
1412 *suptr++ = addrptr[2] << FLT_SHIFT; 1422 *suptr++ = addrptr[2] << FLT_SHIFT;
1413 } 1423 }
1414 1424
1415 for (; i<14; i++) { 1425 for (i = netdev_mc_count(dev); i < 14; i++) {
1416 *suptr++ = 0xffff << FLT_SHIFT; 1426 *suptr++ = 0xffff << FLT_SHIFT;
1417 *suptr++ = 0xffff << FLT_SHIFT; 1427 *suptr++ = 0xffff << FLT_SHIFT;
1418 *suptr++ = 0xffff << FLT_SHIFT; 1428 *suptr++ = 0xffff << FLT_SHIFT;
@@ -1432,7 +1442,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
1432 update_cr6(db->cr6_data, dev->base_addr); 1442 update_cr6(db->cr6_data, dev->base_addr);
1433 dev->trans_start = jiffies; 1443 dev->trans_start = jiffies;
1434 } else 1444 } else
1435 printk(KERN_ERR DRV_NAME ": No Tx resource - Send_filter_frame!\n"); 1445 pr_err("No Tx resource - Send_filter_frame!\n");
1436} 1446}
1437 1447
1438 1448
@@ -1783,7 +1793,7 @@ static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
1783} 1793}
1784 1794
1785 1795
1786static struct pci_device_id uli526x_pci_tbl[] = { 1796static DEFINE_PCI_DEVICE_TABLE(uli526x_pci_tbl) = {
1787 { 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID }, 1797 { 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID },
1788 { 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID }, 1798 { 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID },
1789 { 0, } 1799 { 0, }
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 869a7a0005f9..304f43866c44 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -218,7 +218,7 @@ enum chip_capability_flags {
218 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8, 218 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
219}; 219};
220 220
221static const struct pci_device_id w840_pci_tbl[] = { 221static DEFINE_PCI_DEVICE_TABLE(w840_pci_tbl) = {
222 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 }, 222 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
223 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, 223 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
224 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, 224 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
@@ -376,8 +376,8 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
376 irq = pdev->irq; 376 irq = pdev->irq;
377 377
378 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 378 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
379 printk(KERN_WARNING "Winbond-840: Device %s disabled due to DMA limitations.\n", 379 pr_warning("Winbond-840: Device %s disabled due to DMA limitations\n",
380 pci_name(pdev)); 380 pci_name(pdev));
381 return -EIO; 381 return -EIO;
382 } 382 }
383 dev = alloc_etherdev(sizeof(*np)); 383 dev = alloc_etherdev(sizeof(*np));
@@ -422,8 +422,9 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
422 if (option & 0x200) 422 if (option & 0x200)
423 np->mii_if.full_duplex = 1; 423 np->mii_if.full_duplex = 1;
424 if (option & 15) 424 if (option & 15)
425 printk(KERN_INFO "%s: ignoring user supplied media type %d", 425 dev_info(&dev->dev,
426 dev->name, option & 15); 426 "ignoring user supplied media type %d",
427 option & 15);
427 } 428 }
428 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0) 429 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
429 np->mii_if.full_duplex = 1; 430 np->mii_if.full_duplex = 1;
@@ -440,9 +441,8 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
440 if (i) 441 if (i)
441 goto err_out_cleardev; 442 goto err_out_cleardev;
442 443
443 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n", 444 dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n",
444 dev->name, pci_id_tbl[chip_idx].name, ioaddr, 445 pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq);
445 dev->dev_addr, irq);
446 446
447 if (np->drv_flags & CanHaveMII) { 447 if (np->drv_flags & CanHaveMII) {
448 int phy, phy_idx = 0; 448 int phy, phy_idx = 0;
@@ -453,16 +453,17 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
453 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE); 453 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
454 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+ 454 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
455 mdio_read(dev, phy, MII_PHYSID2); 455 mdio_read(dev, phy, MII_PHYSID2);
456 printk(KERN_INFO "%s: MII PHY %8.8xh found at address %d, status " 456 dev_info(&dev->dev,
457 "0x%4.4x advertising %4.4x.\n", 457 "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
458 dev->name, np->mii, phy, mii_status, np->mii_if.advertising); 458 np->mii, phy, mii_status,
459 np->mii_if.advertising);
459 } 460 }
460 } 461 }
461 np->mii_cnt = phy_idx; 462 np->mii_cnt = phy_idx;
462 np->mii_if.phy_id = np->phys[0]; 463 np->mii_if.phy_id = np->phys[0];
463 if (phy_idx == 0) { 464 if (phy_idx == 0) {
464 printk(KERN_WARNING "%s: MII PHY not found -- this device may " 465 dev_warn(&dev->dev,
465 "not operate correctly.\n", dev->name); 466 "MII PHY not found -- this device may not operate correctly\n");
466 } 467 }
467 } 468 }
468 469
@@ -644,8 +645,8 @@ static int netdev_open(struct net_device *dev)
644 goto out_err; 645 goto out_err;
645 646
646 if (debug > 1) 647 if (debug > 1)
647 printk(KERN_DEBUG "%s: w89c840_open() irq %d.\n", 648 printk(KERN_DEBUG "%s: w89c840_open() irq %d\n",
648 dev->name, dev->irq); 649 dev->name, dev->irq);
649 650
650 if((i=alloc_ringdesc(dev))) 651 if((i=alloc_ringdesc(dev)))
651 goto out_err; 652 goto out_err;
@@ -657,7 +658,7 @@ static int netdev_open(struct net_device *dev)
657 658
658 netif_start_queue(dev); 659 netif_start_queue(dev);
659 if (debug > 2) 660 if (debug > 2)
660 printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name); 661 printk(KERN_DEBUG "%s: Done netdev_open()\n", dev->name);
661 662
662 /* Set the timer to check for link beat. */ 663 /* Set the timer to check for link beat. */
663 init_timer(&np->timer); 664 init_timer(&np->timer);
@@ -688,16 +689,18 @@ static int update_link(struct net_device *dev)
688 if (!(mii_reg & 0x4)) { 689 if (!(mii_reg & 0x4)) {
689 if (netif_carrier_ok(dev)) { 690 if (netif_carrier_ok(dev)) {
690 if (debug) 691 if (debug)
691 printk(KERN_INFO "%s: MII #%d reports no link. Disabling watchdog.\n", 692 dev_info(&dev->dev,
692 dev->name, np->phys[0]); 693 "MII #%d reports no link. Disabling watchdog\n",
694 np->phys[0]);
693 netif_carrier_off(dev); 695 netif_carrier_off(dev);
694 } 696 }
695 return np->csr6; 697 return np->csr6;
696 } 698 }
697 if (!netif_carrier_ok(dev)) { 699 if (!netif_carrier_ok(dev)) {
698 if (debug) 700 if (debug)
699 printk(KERN_INFO "%s: MII #%d link is back. Enabling watchdog.\n", 701 dev_info(&dev->dev,
700 dev->name, np->phys[0]); 702 "MII #%d link is back. Enabling watchdog\n",
703 np->phys[0]);
701 netif_carrier_on(dev); 704 netif_carrier_on(dev);
702 } 705 }
703 706
@@ -729,9 +732,10 @@ static int update_link(struct net_device *dev)
729 if (fasteth) 732 if (fasteth)
730 result |= 0x20000000; 733 result |= 0x20000000;
731 if (result != np->csr6 && debug) 734 if (result != np->csr6 && debug)
732 printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n", 735 dev_info(&dev->dev,
733 dev->name, fasteth ? 100 : 10, 736 "Setting %dMBit-%s-duplex based on MII#%d\n",
734 duplex ? "full" : "half", np->phys[0]); 737 fasteth ? 100 : 10, duplex ? "full" : "half",
738 np->phys[0]);
735 return result; 739 return result;
736} 740}
737 741
@@ -763,8 +767,8 @@ static inline void update_csr6(struct net_device *dev, int new)
763 767
764 limit--; 768 limit--;
765 if(!limit) { 769 if(!limit) {
766 printk(KERN_INFO "%s: couldn't stop rxtx, IntrStatus %xh.\n", 770 dev_info(&dev->dev,
767 dev->name, csr5); 771 "couldn't stop rxtx, IntrStatus %xh\n", csr5);
768 break; 772 break;
769 } 773 }
770 udelay(1); 774 udelay(1);
@@ -783,10 +787,9 @@ static void netdev_timer(unsigned long data)
783 void __iomem *ioaddr = np->base_addr; 787 void __iomem *ioaddr = np->base_addr;
784 788
785 if (debug > 2) 789 if (debug > 2)
786 printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x " 790 printk(KERN_DEBUG "%s: Media selection timer tick, status %08x config %08x\n",
787 "config %8.8x.\n", 791 dev->name, ioread32(ioaddr + IntrStatus),
788 dev->name, ioread32(ioaddr + IntrStatus), 792 ioread32(ioaddr + NetworkConfig));
789 ioread32(ioaddr + NetworkConfig));
790 spin_lock_irq(&np->lock); 793 spin_lock_irq(&np->lock);
791 update_csr6(dev, update_link(dev)); 794 update_csr6(dev, update_link(dev));
792 spin_unlock_irq(&np->lock); 795 spin_unlock_irq(&np->lock);
@@ -899,8 +902,8 @@ static void init_registers(struct net_device *dev)
899 /* When not a module we can work around broken '486 PCI boards. */ 902 /* When not a module we can work around broken '486 PCI boards. */
900 if (boot_cpu_data.x86 <= 4) { 903 if (boot_cpu_data.x86 <= 4) {
901 i |= 0x4800; 904 i |= 0x4800;
902 printk(KERN_INFO "%s: This is a 386/486 PCI system, setting cache " 905 dev_info(&dev->dev,
903 "alignment to 8 longwords.\n", dev->name); 906 "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
904 } else { 907 } else {
905 i |= 0xE000; 908 i |= 0xE000;
906 } 909 }
@@ -931,22 +934,23 @@ static void tx_timeout(struct net_device *dev)
931 struct netdev_private *np = netdev_priv(dev); 934 struct netdev_private *np = netdev_priv(dev);
932 void __iomem *ioaddr = np->base_addr; 935 void __iomem *ioaddr = np->base_addr;
933 936
934 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x," 937 dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
935 " resetting...\n", dev->name, ioread32(ioaddr + IntrStatus)); 938 ioread32(ioaddr + IntrStatus));
936 939
937 { 940 {
938 int i; 941 int i;
939 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); 942 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
940 for (i = 0; i < RX_RING_SIZE; i++) 943 for (i = 0; i < RX_RING_SIZE; i++)
941 printk(" %8.8x", (unsigned int)np->rx_ring[i].status); 944 printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status);
942 printk(KERN_DEBUG" Tx ring %p: ", np->tx_ring); 945 printk(KERN_CONT "\n");
946 printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
943 for (i = 0; i < TX_RING_SIZE; i++) 947 for (i = 0; i < TX_RING_SIZE; i++)
944 printk(" %8.8x", np->tx_ring[i].status); 948 printk(KERN_CONT " %08x", np->tx_ring[i].status);
945 printk("\n"); 949 printk(KERN_CONT "\n");
946 } 950 }
947 printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d.\n", 951 printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n",
948 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes); 952 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
949 printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",ioread32(ioaddr+0x4C)); 953 printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
950 954
951 disable_irq(dev->irq); 955 disable_irq(dev->irq);
952 spin_lock_irq(&np->lock); 956 spin_lock_irq(&np->lock);
@@ -1055,8 +1059,8 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1055 dev->trans_start = jiffies; 1059 dev->trans_start = jiffies;
1056 1060
1057 if (debug > 4) { 1061 if (debug > 4) {
1058 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", 1062 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d\n",
1059 dev->name, np->cur_tx, entry); 1063 dev->name, np->cur_tx, entry);
1060 } 1064 }
1061 return NETDEV_TX_OK; 1065 return NETDEV_TX_OK;
1062} 1066}
@@ -1073,8 +1077,8 @@ static void netdev_tx_done(struct net_device *dev)
1073 if (tx_status & 0x8000) { /* There was an error, log it. */ 1077 if (tx_status & 0x8000) { /* There was an error, log it. */
1074#ifndef final_version 1078#ifndef final_version
1075 if (debug > 1) 1079 if (debug > 1)
1076 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", 1080 printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
1077 dev->name, tx_status); 1081 dev->name, tx_status);
1078#endif 1082#endif
1079 np->stats.tx_errors++; 1083 np->stats.tx_errors++;
1080 if (tx_status & 0x0104) np->stats.tx_aborted_errors++; 1084 if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
@@ -1086,8 +1090,8 @@ static void netdev_tx_done(struct net_device *dev)
1086 } else { 1090 } else {
1087#ifndef final_version 1091#ifndef final_version
1088 if (debug > 3) 1092 if (debug > 3)
1089 printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %8.8x.\n", 1093 printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %08x\n",
1090 dev->name, entry, tx_status); 1094 dev->name, entry, tx_status);
1091#endif 1095#endif
1092 np->stats.tx_bytes += np->tx_skbuff[entry]->len; 1096 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1093 np->stats.collisions += (tx_status >> 3) & 15; 1097 np->stats.collisions += (tx_status >> 3) & 15;
@@ -1130,8 +1134,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
1130 iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus); 1134 iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
1131 1135
1132 if (debug > 4) 1136 if (debug > 4)
1133 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", 1137 printk(KERN_DEBUG "%s: Interrupt, status %04x\n",
1134 dev->name, intr_status); 1138 dev->name, intr_status);
1135 1139
1136 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0) 1140 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1137 break; 1141 break;
@@ -1156,8 +1160,9 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
1156 netdev_error(dev, intr_status); 1160 netdev_error(dev, intr_status);
1157 1161
1158 if (--work_limit < 0) { 1162 if (--work_limit < 0) {
1159 printk(KERN_WARNING "%s: Too much work at interrupt, " 1163 dev_warn(&dev->dev,
1160 "status=0x%4.4x.\n", dev->name, intr_status); 1164 "Too much work at interrupt, status=0x%04x\n",
1165 intr_status);
1161 /* Set the timer to re-enable the other interrupts after 1166 /* Set the timer to re-enable the other interrupts after
1162 10*82usec ticks. */ 1167 10*82usec ticks. */
1163 spin_lock(&np->lock); 1168 spin_lock(&np->lock);
@@ -1171,8 +1176,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
1171 } while (1); 1176 } while (1);
1172 1177
1173 if (debug > 3) 1178 if (debug > 3)
1174 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", 1179 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x\n",
1175 dev->name, ioread32(ioaddr + IntrStatus)); 1180 dev->name, ioread32(ioaddr + IntrStatus));
1176 return IRQ_RETVAL(handled); 1181 return IRQ_RETVAL(handled);
1177} 1182}
1178 1183
@@ -1185,8 +1190,8 @@ static int netdev_rx(struct net_device *dev)
1185 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx; 1190 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1186 1191
1187 if (debug > 4) { 1192 if (debug > 4) {
1188 printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n", 1193 printk(KERN_DEBUG " In netdev_rx(), entry %d status %04x\n",
1189 entry, np->rx_ring[entry].status); 1194 entry, np->rx_ring[entry].status);
1190 } 1195 }
1191 1196
1192 /* If EOP is set on the next entry, it's a new packet. Send it up. */ 1197 /* If EOP is set on the next entry, it's a new packet. Send it up. */
@@ -1195,24 +1200,24 @@ static int netdev_rx(struct net_device *dev)
1195 s32 status = desc->status; 1200 s32 status = desc->status;
1196 1201
1197 if (debug > 4) 1202 if (debug > 4)
1198 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", 1203 printk(KERN_DEBUG " netdev_rx() status was %08x\n",
1199 status); 1204 status);
1200 if (status < 0) 1205 if (status < 0)
1201 break; 1206 break;
1202 if ((status & 0x38008300) != 0x0300) { 1207 if ((status & 0x38008300) != 0x0300) {
1203 if ((status & 0x38000300) != 0x0300) { 1208 if ((status & 0x38000300) != 0x0300) {
1204 /* Ingore earlier buffers. */ 1209 /* Ingore earlier buffers. */
1205 if ((status & 0xffff) != 0x7fff) { 1210 if ((status & 0xffff) != 0x7fff) {
1206 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned " 1211 dev_warn(&dev->dev,
1207 "multiple buffers, entry %#x status %4.4x!\n", 1212 "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
1208 dev->name, np->cur_rx, status); 1213 np->cur_rx, status);
1209 np->stats.rx_length_errors++; 1214 np->stats.rx_length_errors++;
1210 } 1215 }
1211 } else if (status & 0x8000) { 1216 } else if (status & 0x8000) {
1212 /* There was a fatal error. */ 1217 /* There was a fatal error. */
1213 if (debug > 2) 1218 if (debug > 2)
1214 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n", 1219 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
1215 dev->name, status); 1220 dev->name, status);
1216 np->stats.rx_errors++; /* end of a packet.*/ 1221 np->stats.rx_errors++; /* end of a packet.*/
1217 if (status & 0x0890) np->stats.rx_length_errors++; 1222 if (status & 0x0890) np->stats.rx_length_errors++;
1218 if (status & 0x004C) np->stats.rx_frame_errors++; 1223 if (status & 0x004C) np->stats.rx_frame_errors++;
@@ -1225,8 +1230,8 @@ static int netdev_rx(struct net_device *dev)
1225 1230
1226#ifndef final_version 1231#ifndef final_version
1227 if (debug > 4) 1232 if (debug > 4)
1228 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d" 1233 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d status %x\n",
1229 " status %x.\n", pkt_len, status); 1234 pkt_len, status);
1230#endif 1235#endif
1231 /* Check if the packet is long enough to accept without copying 1236 /* Check if the packet is long enough to accept without copying
1232 to a minimally-sized skbuff. */ 1237 to a minimally-sized skbuff. */
@@ -1251,11 +1256,10 @@ static int netdev_rx(struct net_device *dev)
1251#ifndef final_version /* Remove after testing. */ 1256#ifndef final_version /* Remove after testing. */
1252 /* You will want this info for the initial debug. */ 1257 /* You will want this info for the initial debug. */
1253 if (debug > 5) 1258 if (debug > 5)
1254 printk(KERN_DEBUG " Rx data %pM %pM" 1259 printk(KERN_DEBUG " Rx data %pM %pM %02x%02x %pI4\n",
1255 " %2.2x%2.2x %d.%d.%d.%d.\n",
1256 &skb->data[0], &skb->data[6], 1260 &skb->data[0], &skb->data[6],
1257 skb->data[12], skb->data[13], 1261 skb->data[12], skb->data[13],
1258 skb->data[14], skb->data[15], skb->data[16], skb->data[17]); 1262 &skb->data[14]);
1259#endif 1263#endif
1260 skb->protocol = eth_type_trans(skb, dev); 1264 skb->protocol = eth_type_trans(skb, dev);
1261 netif_rx(skb); 1265 netif_rx(skb);
@@ -1293,8 +1297,8 @@ static void netdev_error(struct net_device *dev, int intr_status)
1293 void __iomem *ioaddr = np->base_addr; 1297 void __iomem *ioaddr = np->base_addr;
1294 1298
1295 if (debug > 2) 1299 if (debug > 2)
1296 printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n", 1300 printk(KERN_DEBUG "%s: Abnormal event, %08x\n",
1297 dev->name, intr_status); 1301 dev->name, intr_status);
1298 if (intr_status == 0xffffffff) 1302 if (intr_status == 0xffffffff)
1299 return; 1303 return;
1300 spin_lock(&np->lock); 1304 spin_lock(&np->lock);
@@ -1314,8 +1318,8 @@ static void netdev_error(struct net_device *dev, int intr_status)
1314 new = 127; /* load full packet before starting */ 1318 new = 127; /* load full packet before starting */
1315 new = (np->csr6 & ~(0x7F << 14)) | (new<<14); 1319 new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1316#endif 1320#endif
1317 printk(KERN_DEBUG "%s: Tx underflow, new csr6 %8.8x.\n", 1321 printk(KERN_DEBUG "%s: Tx underflow, new csr6 %08x\n",
1318 dev->name, new); 1322 dev->name, new);
1319 update_csr6(dev, new); 1323 update_csr6(dev, new);
1320 } 1324 }
1321 if (intr_status & RxDied) { /* Missed a Rx frame. */ 1325 if (intr_status & RxDied) { /* Missed a Rx frame. */
@@ -1357,17 +1361,16 @@ static u32 __set_rx_mode(struct net_device *dev)
1357 memset(mc_filter, 0xff, sizeof(mc_filter)); 1361 memset(mc_filter, 0xff, sizeof(mc_filter));
1358 rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys 1362 rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
1359 | AcceptMyPhys; 1363 | AcceptMyPhys;
1360 } else if ((dev->mc_count > multicast_filter_limit) || 1364 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1361 (dev->flags & IFF_ALLMULTI)) { 1365 (dev->flags & IFF_ALLMULTI)) {
1362 /* Too many to match, or accept all multicasts. */ 1366 /* Too many to match, or accept all multicasts. */
1363 memset(mc_filter, 0xff, sizeof(mc_filter)); 1367 memset(mc_filter, 0xff, sizeof(mc_filter));
1364 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys; 1368 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1365 } else { 1369 } else {
1366 struct dev_mc_list *mclist; 1370 struct dev_mc_list *mclist;
1367 int i; 1371
1368 memset(mc_filter, 0, sizeof(mc_filter)); 1372 memset(mc_filter, 0, sizeof(mc_filter));
1369 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1373 netdev_for_each_mc_addr(mclist, dev) {
1370 i++, mclist = mclist->next) {
1371 int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F; 1374 int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
1372 filterbit &= 0x3f; 1375 filterbit &= 0x3f;
1373 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); 1376 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
@@ -1487,11 +1490,13 @@ static int netdev_close(struct net_device *dev)
1487 netif_stop_queue(dev); 1490 netif_stop_queue(dev);
1488 1491
1489 if (debug > 1) { 1492 if (debug > 1) {
1490 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x " 1493 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %08x Config %08x\n",
1491 "Config %8.8x.\n", dev->name, ioread32(ioaddr + IntrStatus), 1494 dev->name, ioread32(ioaddr + IntrStatus),
1492 ioread32(ioaddr + NetworkConfig)); 1495 ioread32(ioaddr + NetworkConfig));
1493 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", 1496 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d\n",
1494 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx); 1497 dev->name,
1498 np->cur_tx, np->dirty_tx,
1499 np->cur_rx, np->dirty_rx);
1495 } 1500 }
1496 1501
1497 /* Stop the chip's Tx and Rx processes. */ 1502 /* Stop the chip's Tx and Rx processes. */
@@ -1512,18 +1517,16 @@ static int netdev_close(struct net_device *dev)
1512 if (debug > 2) { 1517 if (debug > 2) {
1513 int i; 1518 int i;
1514 1519
1515 printk(KERN_DEBUG" Tx ring at %8.8x:\n", 1520 printk(KERN_DEBUG" Tx ring at %08x:\n", (int)np->tx_ring);
1516 (int)np->tx_ring);
1517 for (i = 0; i < TX_RING_SIZE; i++) 1521 for (i = 0; i < TX_RING_SIZE; i++)
1518 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x.\n", 1522 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1519 i, np->tx_ring[i].length, 1523 i, np->tx_ring[i].length,
1520 np->tx_ring[i].status, np->tx_ring[i].buffer1); 1524 np->tx_ring[i].status, np->tx_ring[i].buffer1);
1521 printk(KERN_DEBUG " Rx ring %8.8x:\n", 1525 printk(KERN_DEBUG " Rx ring %08x:\n", (int)np->rx_ring);
1522 (int)np->rx_ring);
1523 for (i = 0; i < RX_RING_SIZE; i++) { 1526 for (i = 0; i < RX_RING_SIZE; i++) {
1524 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n", 1527 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1525 i, np->rx_ring[i].length, 1528 i, np->rx_ring[i].length,
1526 np->rx_ring[i].status, np->rx_ring[i].buffer1); 1529 np->rx_ring[i].status, np->rx_ring[i].buffer1);
1527 } 1530 }
1528 } 1531 }
1529#endif /* __i386__ debugging only */ 1532#endif /* __i386__ debugging only */
@@ -1622,9 +1625,8 @@ static int w840_resume (struct pci_dev *pdev)
1622 goto out; /* device not suspended */ 1625 goto out; /* device not suspended */
1623 if (netif_running(dev)) { 1626 if (netif_running(dev)) {
1624 if ((retval = pci_enable_device(pdev))) { 1627 if ((retval = pci_enable_device(pdev))) {
1625 printk (KERN_ERR 1628 dev_err(&dev->dev,
1626 "%s: pci_enable_device failed in resume\n", 1629 "pci_enable_device failed in resume\n");
1627 dev->name);
1628 goto out; 1630 goto out;
1629 } 1631 }
1630 spin_lock_irq(&np->lock); 1632 spin_lock_irq(&np->lock);
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 9924c4c7e2d6..acfeeb980562 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -14,6 +14,8 @@
14 * $Id: xircom_cb.c,v 1.33 2001/03/19 14:02:07 arjanv Exp $ 14 * $Id: xircom_cb.c,v 1.33 2001/03/19 14:02:07 arjanv Exp $
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <linux/module.h> 19#include <linux/module.h>
18#include <linux/kernel.h> 20#include <linux/kernel.h>
19#include <linux/string.h> 21#include <linux/string.h>
@@ -144,7 +146,7 @@ static int link_status(struct xircom_private *card);
144 146
145 147
146 148
147static struct pci_device_id xircom_pci_table[] = { 149static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = {
148 {0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,}, 150 {0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,},
149 {0,}, 151 {0,},
150}; 152};
@@ -234,7 +236,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
234 pci_write_config_word (pdev, PCI_STATUS,tmp16); 236 pci_write_config_word (pdev, PCI_STATUS,tmp16);
235 237
236 if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) { 238 if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) {
237 printk(KERN_ERR "xircom_probe: failed to allocate io-region\n"); 239 pr_err("%s: failed to allocate io-region\n", __func__);
238 return -ENODEV; 240 return -ENODEV;
239 } 241 }
240 242
@@ -245,7 +247,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
245 */ 247 */
246 dev = alloc_etherdev(sizeof(struct xircom_private)); 248 dev = alloc_etherdev(sizeof(struct xircom_private));
247 if (!dev) { 249 if (!dev) {
248 printk(KERN_ERR "xircom_probe: failed to allocate etherdev\n"); 250 pr_err("%s: failed to allocate etherdev\n", __func__);
249 goto device_fail; 251 goto device_fail;
250 } 252 }
251 private = netdev_priv(dev); 253 private = netdev_priv(dev);
@@ -253,12 +255,12 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
253 /* Allocate the send/receive buffers */ 255 /* Allocate the send/receive buffers */
254 private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle); 256 private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle);
255 if (private->rx_buffer == NULL) { 257 if (private->rx_buffer == NULL) {
256 printk(KERN_ERR "xircom_probe: no memory for rx buffer \n"); 258 pr_err("%s: no memory for rx buffer\n", __func__);
257 goto rx_buf_fail; 259 goto rx_buf_fail;
258 } 260 }
259 private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle); 261 private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle);
260 if (private->tx_buffer == NULL) { 262 if (private->tx_buffer == NULL) {
261 printk(KERN_ERR "xircom_probe: no memory for tx buffer \n"); 263 pr_err("%s: no memory for tx buffer\n", __func__);
262 goto tx_buf_fail; 264 goto tx_buf_fail;
263 } 265 }
264 266
@@ -281,11 +283,12 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
281 pci_set_drvdata(pdev, dev); 283 pci_set_drvdata(pdev, dev);
282 284
283 if (register_netdev(dev)) { 285 if (register_netdev(dev)) {
284 printk(KERN_ERR "xircom_probe: netdevice registration failed.\n"); 286 pr_err("%s: netdevice registration failed\n", __func__);
285 goto reg_fail; 287 goto reg_fail;
286 } 288 }
287 289
288 printk(KERN_INFO "%s: Xircom cardbus revision %i at irq %i \n", dev->name, pdev->revision, pdev->irq); 290 dev_info(&dev->dev, "Xircom cardbus revision %i at irq %i\n",
291 pdev->revision, pdev->irq);
289 /* start the transmitter to get a heartbeat */ 292 /* start the transmitter to get a heartbeat */
290 /* TODO: send 2 dummy packets here */ 293 /* TODO: send 2 dummy packets here */
291 transceiver_voodoo(private); 294 transceiver_voodoo(private);
@@ -347,8 +350,10 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
347 350
348#ifdef DEBUG 351#ifdef DEBUG
349 print_binary(status); 352 print_binary(status);
350 printk("tx status 0x%08x 0x%08x \n",card->tx_buffer[0],card->tx_buffer[4]); 353 printk("tx status 0x%08x 0x%08x \n",
351 printk("rx status 0x%08x 0x%08x \n",card->rx_buffer[0],card->rx_buffer[4]); 354 card->tx_buffer[0], card->tx_buffer[4]);
355 printk("rx status 0x%08x 0x%08x \n",
356 card->rx_buffer[0], card->rx_buffer[4]);
352#endif 357#endif
353 /* Handle shared irq and hotplug */ 358 /* Handle shared irq and hotplug */
354 if (status == 0 || status == 0xffffffff) { 359 if (status == 0 || status == 0xffffffff) {
@@ -358,9 +363,9 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
358 363
359 if (link_status_changed(card)) { 364 if (link_status_changed(card)) {
360 int newlink; 365 int newlink;
361 printk(KERN_DEBUG "xircom_cb: Link status has changed \n"); 366 printk(KERN_DEBUG "xircom_cb: Link status has changed\n");
362 newlink = link_status(card); 367 newlink = link_status(card);
363 printk(KERN_INFO "xircom_cb: Link is %i mbit \n",newlink); 368 dev_info(&dev->dev, "Link is %i mbit\n", newlink);
364 if (newlink) 369 if (newlink)
365 netif_carrier_on(dev); 370 netif_carrier_on(dev);
366 else 371 else
@@ -457,7 +462,8 @@ static int xircom_open(struct net_device *dev)
457 struct xircom_private *xp = netdev_priv(dev); 462 struct xircom_private *xp = netdev_priv(dev);
458 int retval; 463 int retval;
459 enter("xircom_open"); 464 enter("xircom_open");
460 printk(KERN_INFO "xircom cardbus adaptor found, registering as %s, using irq %i \n",dev->name,dev->irq); 465 pr_info("xircom cardbus adaptor found, registering as %s, using irq %i \n",
466 dev->name, dev->irq);
461 retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev); 467 retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
462 if (retval) { 468 if (retval) {
463 leave("xircom_open - No IRQ"); 469 leave("xircom_open - No IRQ");
@@ -770,7 +776,7 @@ static void activate_receiver(struct xircom_private *card)
770 udelay(50); 776 udelay(50);
771 counter--; 777 counter--;
772 if (counter <= 0) 778 if (counter <= 0)
773 printk(KERN_ERR "xircom_cb: Receiver failed to deactivate\n"); 779 pr_err("Receiver failed to deactivate\n");
774 } 780 }
775 781
776 /* enable the receiver */ 782 /* enable the receiver */
@@ -787,7 +793,7 @@ static void activate_receiver(struct xircom_private *card)
787 udelay(50); 793 udelay(50);
788 counter--; 794 counter--;
789 if (counter <= 0) 795 if (counter <= 0)
790 printk(KERN_ERR "xircom_cb: Receiver failed to re-activate\n"); 796 pr_err("Receiver failed to re-activate\n");
791 } 797 }
792 798
793 leave("activate_receiver"); 799 leave("activate_receiver");
@@ -818,7 +824,7 @@ static void deactivate_receiver(struct xircom_private *card)
818 udelay(50); 824 udelay(50);
819 counter--; 825 counter--;
820 if (counter <= 0) 826 if (counter <= 0)
821 printk(KERN_ERR "xircom_cb: Receiver failed to deactivate\n"); 827 pr_err("Receiver failed to deactivate\n");
822 } 828 }
823 829
824 830
@@ -861,7 +867,7 @@ static void activate_transmitter(struct xircom_private *card)
861 udelay(50); 867 udelay(50);
862 counter--; 868 counter--;
863 if (counter <= 0) 869 if (counter <= 0)
864 printk(KERN_ERR "xircom_cb: Transmitter failed to deactivate\n"); 870 pr_err("Transmitter failed to deactivate\n");
865 } 871 }
866 872
867 /* enable the transmitter */ 873 /* enable the transmitter */
@@ -878,7 +884,7 @@ static void activate_transmitter(struct xircom_private *card)
878 udelay(50); 884 udelay(50);
879 counter--; 885 counter--;
880 if (counter <= 0) 886 if (counter <= 0)
881 printk(KERN_ERR "xircom_cb: Transmitter failed to re-activate\n"); 887 pr_err("Transmitter failed to re-activate\n");
882 } 888 }
883 889
884 leave("activate_transmitter"); 890 leave("activate_transmitter");
@@ -909,7 +915,7 @@ static void deactivate_transmitter(struct xircom_private *card)
909 udelay(50); 915 udelay(50);
910 counter--; 916 counter--;
911 if (counter <= 0) 917 if (counter <= 0)
912 printk(KERN_ERR "xircom_cb: Transmitter failed to deactivate\n"); 918 pr_err("Transmitter failed to deactivate\n");
913 } 919 }
914 920
915 921
@@ -1184,7 +1190,7 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri
1184 struct sk_buff *skb; 1190 struct sk_buff *skb;
1185 1191
1186 if (pkt_len > 1518) { 1192 if (pkt_len > 1518) {
1187 printk(KERN_ERR "xircom_cb: Packet length %i is bogus \n",pkt_len); 1193 pr_err("Packet length %i is bogus\n", pkt_len);
1188 pkt_len = 1518; 1194 pkt_len = 1518;
1189 } 1195 }
1190 1196
@@ -1222,7 +1228,7 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p
1222 status = le32_to_cpu(card->tx_buffer[4*descnr]); 1228 status = le32_to_cpu(card->tx_buffer[4*descnr]);
1223#if 0 1229#if 0
1224 if (status & 0x8000) { /* Major error */ 1230 if (status & 0x8000) { /* Major error */
1225 printk(KERN_ERR "Major transmit error status %x \n", status); 1231 pr_err("Major transmit error status %x\n", status);
1226 card->tx_buffer[4*descnr] = 0; 1232 card->tx_buffer[4*descnr] = 0;
1227 netif_wake_queue (dev); 1233 netif_wake_queue (dev);
1228 } 1234 }
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 2834a01bae24..ce1efa4c0b0d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -61,6 +61,7 @@
61#include <linux/crc32.h> 61#include <linux/crc32.h>
62#include <linux/nsproxy.h> 62#include <linux/nsproxy.h>
63#include <linux/virtio_net.h> 63#include <linux/virtio_net.h>
64#include <linux/rcupdate.h>
64#include <net/net_namespace.h> 65#include <net/net_namespace.h>
65#include <net/netns/generic.h> 66#include <net/netns/generic.h>
66#include <net/rtnetlink.h> 67#include <net/rtnetlink.h>
@@ -144,6 +145,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
144 err = 0; 145 err = 0;
145 tfile->tun = tun; 146 tfile->tun = tun;
146 tun->tfile = tfile; 147 tun->tfile = tfile;
148 tun->socket.file = file;
147 dev_hold(tun->dev); 149 dev_hold(tun->dev);
148 sock_hold(tun->socket.sk); 150 sock_hold(tun->socket.sk);
149 atomic_inc(&tfile->count); 151 atomic_inc(&tfile->count);
@@ -158,6 +160,7 @@ static void __tun_detach(struct tun_struct *tun)
158 /* Detach from net device */ 160 /* Detach from net device */
159 netif_tx_lock_bh(tun->dev); 161 netif_tx_lock_bh(tun->dev);
160 tun->tfile = NULL; 162 tun->tfile = NULL;
163 tun->socket.file = NULL;
161 netif_tx_unlock_bh(tun->dev); 164 netif_tx_unlock_bh(tun->dev);
162 165
163 /* Drop read queue */ 166 /* Drop read queue */
@@ -364,6 +367,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
364 if (!check_filter(&tun->txflt, skb)) 367 if (!check_filter(&tun->txflt, skb))
365 goto drop; 368 goto drop;
366 369
370 if (tun->socket.sk->sk_filter &&
371 sk_filter(tun->socket.sk, skb))
372 goto drop;
373
367 if (skb_queue_len(&tun->socket.sk->sk_receive_queue) >= dev->tx_queue_len) { 374 if (skb_queue_len(&tun->socket.sk->sk_receive_queue) >= dev->tx_queue_len) {
368 if (!(tun->flags & TUN_ONE_QUEUE)) { 375 if (!(tun->flags & TUN_ONE_QUEUE)) {
369 /* Normal queueing mode. */ 376 /* Normal queueing mode. */
@@ -387,7 +394,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
387 /* Notify and wake up reader process */ 394 /* Notify and wake up reader process */
388 if (tun->flags & TUN_FASYNC) 395 if (tun->flags & TUN_FASYNC)
389 kill_fasync(&tun->fasync, SIGIO, POLL_IN); 396 kill_fasync(&tun->fasync, SIGIO, POLL_IN);
390 wake_up_interruptible(&tun->socket.wait); 397 wake_up_interruptible_poll(&tun->socket.wait, POLLIN |
398 POLLRDNORM | POLLRDBAND);
391 return NETDEV_TX_OK; 399 return NETDEV_TX_OK;
392 400
393drop: 401drop:
@@ -743,7 +751,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
743 len = min_t(int, skb->len, len); 751 len = min_t(int, skb->len, len);
744 752
745 skb_copy_datagram_const_iovec(skb, 0, iv, total, len); 753 skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
746 total += len; 754 total += skb->len;
747 755
748 tun->dev->stats.tx_packets++; 756 tun->dev->stats.tx_packets++;
749 tun->dev->stats.tx_bytes += len; 757 tun->dev->stats.tx_bytes += len;
@@ -751,34 +759,23 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
751 return total; 759 return total;
752} 760}
753 761
754static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, 762static ssize_t tun_do_read(struct tun_struct *tun,
755 unsigned long count, loff_t pos) 763 struct kiocb *iocb, const struct iovec *iv,
764 ssize_t len, int noblock)
756{ 765{
757 struct file *file = iocb->ki_filp;
758 struct tun_file *tfile = file->private_data;
759 struct tun_struct *tun = __tun_get(tfile);
760 DECLARE_WAITQUEUE(wait, current); 766 DECLARE_WAITQUEUE(wait, current);
761 struct sk_buff *skb; 767 struct sk_buff *skb;
762 ssize_t len, ret = 0; 768 ssize_t ret = 0;
763
764 if (!tun)
765 return -EBADFD;
766 769
767 DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name); 770 DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
768 771
769 len = iov_length(iv, count);
770 if (len < 0) {
771 ret = -EINVAL;
772 goto out;
773 }
774
775 add_wait_queue(&tun->socket.wait, &wait); 772 add_wait_queue(&tun->socket.wait, &wait);
776 while (len) { 773 while (len) {
777 current->state = TASK_INTERRUPTIBLE; 774 current->state = TASK_INTERRUPTIBLE;
778 775
779 /* Read frames from the queue */ 776 /* Read frames from the queue */
780 if (!(skb=skb_dequeue(&tun->socket.sk->sk_receive_queue))) { 777 if (!(skb=skb_dequeue(&tun->socket.sk->sk_receive_queue))) {
781 if (file->f_flags & O_NONBLOCK) { 778 if (noblock) {
782 ret = -EAGAIN; 779 ret = -EAGAIN;
783 break; 780 break;
784 } 781 }
@@ -805,6 +802,27 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
805 current->state = TASK_RUNNING; 802 current->state = TASK_RUNNING;
806 remove_wait_queue(&tun->socket.wait, &wait); 803 remove_wait_queue(&tun->socket.wait, &wait);
807 804
805 return ret;
806}
807
808static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
809 unsigned long count, loff_t pos)
810{
811 struct file *file = iocb->ki_filp;
812 struct tun_file *tfile = file->private_data;
813 struct tun_struct *tun = __tun_get(tfile);
814 ssize_t len, ret;
815
816 if (!tun)
817 return -EBADFD;
818 len = iov_length(iv, count);
819 if (len < 0) {
820 ret = -EINVAL;
821 goto out;
822 }
823
824 ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK);
825 ret = min_t(ssize_t, ret, len);
808out: 826out:
809 tun_put(tun); 827 tun_put(tun);
810 return ret; 828 return ret;
@@ -847,7 +865,8 @@ static void tun_sock_write_space(struct sock *sk)
847 return; 865 return;
848 866
849 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 867 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
850 wake_up_interruptible_sync(sk->sk_sleep); 868 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
869 POLLWRNORM | POLLWRBAND);
851 870
852 tun = tun_sk(sk)->tun; 871 tun = tun_sk(sk)->tun;
853 kill_fasync(&tun->fasync, SIGIO, POLL_OUT); 872 kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
@@ -858,6 +877,37 @@ static void tun_sock_destruct(struct sock *sk)
858 free_netdev(tun_sk(sk)->tun->dev); 877 free_netdev(tun_sk(sk)->tun->dev);
859} 878}
860 879
880static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
881 struct msghdr *m, size_t total_len)
882{
883 struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
884 return tun_get_user(tun, m->msg_iov, total_len,
885 m->msg_flags & MSG_DONTWAIT);
886}
887
888static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
889 struct msghdr *m, size_t total_len,
890 int flags)
891{
892 struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
893 int ret;
894 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
895 return -EINVAL;
896 ret = tun_do_read(tun, iocb, m->msg_iov, total_len,
897 flags & MSG_DONTWAIT);
898 if (ret > total_len) {
899 m->msg_flags |= MSG_TRUNC;
900 ret = flags & MSG_TRUNC ? ret : total_len;
901 }
902 return ret;
903}
904
905/* Ops structure to mimic raw sockets with tun */
906static const struct proto_ops tun_socket_ops = {
907 .sendmsg = tun_sendmsg,
908 .recvmsg = tun_recvmsg,
909};
910
861static struct proto tun_proto = { 911static struct proto tun_proto = {
862 .name = "tun", 912 .name = "tun",
863 .owner = THIS_MODULE, 913 .owner = THIS_MODULE,
@@ -986,6 +1036,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
986 goto err_free_dev; 1036 goto err_free_dev;
987 1037
988 init_waitqueue_head(&tun->socket.wait); 1038 init_waitqueue_head(&tun->socket.wait);
1039 tun->socket.ops = &tun_socket_ops;
989 sock_init_data(&tun->socket, sk); 1040 sock_init_data(&tun->socket, sk);
990 sk->sk_write_space = tun_sock_write_space; 1041 sk->sk_write_space = tun_sock_write_space;
991 sk->sk_sndbuf = INT_MAX; 1042 sk->sk_sndbuf = INT_MAX;
@@ -1116,6 +1167,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1116 struct tun_file *tfile = file->private_data; 1167 struct tun_file *tfile = file->private_data;
1117 struct tun_struct *tun; 1168 struct tun_struct *tun;
1118 void __user* argp = (void __user*)arg; 1169 void __user* argp = (void __user*)arg;
1170 struct sock_fprog fprog;
1119 struct ifreq ifr; 1171 struct ifreq ifr;
1120 int sndbuf; 1172 int sndbuf;
1121 int ret; 1173 int ret;
@@ -1263,6 +1315,26 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1263 tun->socket.sk->sk_sndbuf = sndbuf; 1315 tun->socket.sk->sk_sndbuf = sndbuf;
1264 break; 1316 break;
1265 1317
1318 case TUNATTACHFILTER:
1319 /* Can be set only for TAPs */
1320 ret = -EINVAL;
1321 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
1322 break;
1323 ret = -EFAULT;
1324 if (copy_from_user(&fprog, argp, sizeof(fprog)))
1325 break;
1326
1327 ret = sk_attach_filter(&fprog, tun->socket.sk);
1328 break;
1329
1330 case TUNDETACHFILTER:
1331 /* Can be set only for TAPs */
1332 ret = -EINVAL;
1333 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
1334 break;
1335 ret = sk_detach_filter(tun->socket.sk);
1336 break;
1337
1266 default: 1338 default:
1267 ret = -EINVAL; 1339 ret = -EINVAL;
1268 break; 1340 break;
@@ -1525,6 +1597,23 @@ static void tun_cleanup(void)
1525 rtnl_link_unregister(&tun_link_ops); 1597 rtnl_link_unregister(&tun_link_ops);
1526} 1598}
1527 1599
1600/* Get an underlying socket object from tun file. Returns error unless file is
1601 * attached to a device. The returned object works like a packet socket, it
1602 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
1603 * holding a reference to the file for as long as the socket is in use. */
1604struct socket *tun_get_socket(struct file *file)
1605{
1606 struct tun_struct *tun;
1607 if (file->f_op != &tun_fops)
1608 return ERR_PTR(-EINVAL);
1609 tun = tun_get(file);
1610 if (!tun)
1611 return ERR_PTR(-EBADFD);
1612 tun_put(tun);
1613 return &tun->socket;
1614}
1615EXPORT_SYMBOL_GPL(tun_get_socket);
1616
1528module_init(tun_init); 1617module_init(tun_init);
1529module_exit(tun_cleanup); 1618module_exit(tun_cleanup);
1530MODULE_DESCRIPTION(DRV_DESCRIPTION); 1619MODULE_DESCRIPTION(DRV_DESCRIPTION);
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 39f1fc650be6..e3ddcb8f29df 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -98,14 +98,10 @@ static const int multicast_filter_limit = 32;
98#define TX_TIMEOUT (2*HZ) 98#define TX_TIMEOUT (2*HZ)
99 99
100#define PKT_BUF_SZ 1536 100#define PKT_BUF_SZ 1536
101
102#define DRV_MODULE_NAME "typhoon"
103#define DRV_MODULE_VERSION "1.5.9"
104#define DRV_MODULE_RELDATE "Mar 2, 2009"
105#define PFX DRV_MODULE_NAME ": "
106#define ERR_PFX KERN_ERR PFX
107#define FIRMWARE_NAME "3com/typhoon.bin" 101#define FIRMWARE_NAME "3com/typhoon.bin"
108 102
103#define pr_fmt(fmt) KBUILD_MODNAME " " fmt
104
109#include <linux/module.h> 105#include <linux/module.h>
110#include <linux/kernel.h> 106#include <linux/kernel.h>
111#include <linux/sched.h> 107#include <linux/sched.h>
@@ -132,14 +128,12 @@ static const int multicast_filter_limit = 32;
132#include <linux/in6.h> 128#include <linux/in6.h>
133#include <linux/dma-mapping.h> 129#include <linux/dma-mapping.h>
134#include <linux/firmware.h> 130#include <linux/firmware.h>
131#include <generated/utsrelease.h>
135 132
136#include "typhoon.h" 133#include "typhoon.h"
137 134
138static char version[] __devinitdata =
139 "typhoon.c: version " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
140
141MODULE_AUTHOR("David Dillow <dave@thedillows.org>"); 135MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
142MODULE_VERSION(DRV_MODULE_VERSION); 136MODULE_VERSION(UTS_RELEASE);
143MODULE_LICENSE("GPL"); 137MODULE_LICENSE("GPL");
144MODULE_FIRMWARE(FIRMWARE_NAME); 138MODULE_FIRMWARE(FIRMWARE_NAME);
145MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)"); 139MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
@@ -161,8 +155,8 @@ module_param(use_mmio, int, 0);
161#endif 155#endif
162 156
163struct typhoon_card_info { 157struct typhoon_card_info {
164 char *name; 158 const char *name;
165 int capabilities; 159 const int capabilities;
166}; 160};
167 161
168#define TYPHOON_CRYPTO_NONE 0x00 162#define TYPHOON_CRYPTO_NONE 0x00
@@ -215,7 +209,7 @@ static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
215 * bit 8 indicates if this is a (0) copper or (1) fiber card 209 * bit 8 indicates if this is a (0) copper or (1) fiber card
216 * bits 12-16 indicate card type: (0) client and (1) server 210 * bits 12-16 indicate card type: (0) client and (1) server
217 */ 211 */
218static struct pci_device_id typhoon_pci_tbl[] = { 212static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
219 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990, 213 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
220 PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX }, 214 PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
221 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95, 215 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
@@ -299,7 +293,6 @@ struct typhoon {
299 struct basic_ring respRing; 293 struct basic_ring respRing;
300 struct net_device_stats stats; 294 struct net_device_stats stats;
301 struct net_device_stats stats_saved; 295 struct net_device_stats stats_saved;
302 const char * name;
303 struct typhoon_shared * shared; 296 struct typhoon_shared * shared;
304 dma_addr_t shared_dma; 297 dma_addr_t shared_dma;
305 __le16 xcvr_select; 298 __le16 xcvr_select;
@@ -534,13 +527,13 @@ typhoon_process_response(struct typhoon *tp, int resp_size,
534 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) { 527 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
535 typhoon_hello(tp); 528 typhoon_hello(tp);
536 } else { 529 } else {
537 printk(KERN_ERR "%s: dumping unexpected response " 530 netdev_err(tp->dev,
538 "0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n", 531 "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
539 tp->name, le16_to_cpu(resp->cmd), 532 le16_to_cpu(resp->cmd),
540 resp->numDesc, resp->flags, 533 resp->numDesc, resp->flags,
541 le16_to_cpu(resp->parm1), 534 le16_to_cpu(resp->parm1),
542 le32_to_cpu(resp->parm2), 535 le32_to_cpu(resp->parm2),
543 le32_to_cpu(resp->parm3)); 536 le32_to_cpu(resp->parm3));
544 } 537 }
545 538
546cleanup: 539cleanup:
@@ -606,9 +599,8 @@ typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
606 freeResp = typhoon_num_free_resp(tp); 599 freeResp = typhoon_num_free_resp(tp);
607 600
608 if(freeCmd < num_cmd || freeResp < num_resp) { 601 if(freeCmd < num_cmd || freeResp < num_resp) {
609 printk("%s: no descs for cmd, had (needed) %d (%d) cmd, " 602 netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
610 "%d (%d) resp\n", tp->name, freeCmd, num_cmd, 603 freeCmd, num_cmd, freeResp, num_resp);
611 freeResp, num_resp);
612 err = -ENOMEM; 604 err = -ENOMEM;
613 goto out; 605 goto out;
614 } 606 }
@@ -733,7 +725,7 @@ typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
733 spin_unlock_bh(&tp->state_lock); 725 spin_unlock_bh(&tp->state_lock);
734 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 726 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
735 if(err < 0) 727 if(err < 0)
736 printk("%s: vlan offload error %d\n", tp->name, -err); 728 netdev_err(tp->dev, "vlan offload error %d\n", -err);
737 spin_lock_bh(&tp->state_lock); 729 spin_lock_bh(&tp->state_lock);
738 } 730 }
739 731
@@ -924,17 +916,15 @@ typhoon_set_rx_mode(struct net_device *dev)
924 filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST; 916 filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
925 if(dev->flags & IFF_PROMISC) { 917 if(dev->flags & IFF_PROMISC) {
926 filter |= TYPHOON_RX_FILTER_PROMISCOUS; 918 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
927 } else if((dev->mc_count > multicast_filter_limit) || 919 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
928 (dev->flags & IFF_ALLMULTI)) { 920 (dev->flags & IFF_ALLMULTI)) {
929 /* Too many to match, or accept all multicasts. */ 921 /* Too many to match, or accept all multicasts. */
930 filter |= TYPHOON_RX_FILTER_ALL_MCAST; 922 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
931 } else if(dev->mc_count) { 923 } else if (!netdev_mc_empty(dev)) {
932 struct dev_mc_list *mclist; 924 struct dev_mc_list *mclist;
933 int i;
934 925
935 memset(mc_filter, 0, sizeof(mc_filter)); 926 memset(mc_filter, 0, sizeof(mc_filter));
936 for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 927 netdev_for_each_mc_addr(mclist, dev) {
937 i++, mclist = mclist->next) {
938 int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f; 928 int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
939 mc_filter[bit >> 5] |= 1 << (bit & 0x1f); 929 mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
940 } 930 }
@@ -1020,7 +1010,7 @@ typhoon_get_stats(struct net_device *dev)
1020 return saved; 1010 return saved;
1021 1011
1022 if(typhoon_do_get_stats(tp) < 0) { 1012 if(typhoon_do_get_stats(tp) < 0) {
1023 printk(KERN_ERR "%s: error getting stats\n", dev->name); 1013 netdev_err(dev, "error getting stats\n");
1024 return saved; 1014 return saved;
1025 } 1015 }
1026 1016
@@ -1062,8 +1052,8 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1062 } 1052 }
1063 } 1053 }
1064 1054
1065 strcpy(info->driver, DRV_MODULE_NAME); 1055 strcpy(info->driver, KBUILD_MODNAME);
1066 strcpy(info->version, DRV_MODULE_VERSION); 1056 strcpy(info->version, UTS_RELEASE);
1067 strcpy(info->bus_info, pci_name(pci_dev)); 1057 strcpy(info->bus_info, pci_name(pci_dev));
1068} 1058}
1069 1059
@@ -1365,8 +1355,8 @@ typhoon_request_firmware(struct typhoon *tp)
1365 1355
1366 err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev); 1356 err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1367 if (err) { 1357 if (err) {
1368 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n", 1358 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
1369 tp->name, FIRMWARE_NAME); 1359 FIRMWARE_NAME);
1370 return err; 1360 return err;
1371 } 1361 }
1372 1362
@@ -1401,7 +1391,7 @@ typhoon_request_firmware(struct typhoon *tp)
1401 return 0; 1391 return 0;
1402 1392
1403invalid_fw: 1393invalid_fw:
1404 printk(KERN_ERR "%s: Invalid firmware image\n", tp->name); 1394 netdev_err(tp->dev, "Invalid firmware image\n");
1405 release_firmware(typhoon_fw); 1395 release_firmware(typhoon_fw);
1406 typhoon_fw = NULL; 1396 typhoon_fw = NULL;
1407 return -EINVAL; 1397 return -EINVAL;
@@ -1438,7 +1428,7 @@ typhoon_download_firmware(struct typhoon *tp)
1438 err = -ENOMEM; 1428 err = -ENOMEM;
1439 dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma); 1429 dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1440 if(!dpage) { 1430 if(!dpage) {
1441 printk(KERN_ERR "%s: no DMA mem for firmware\n", tp->name); 1431 netdev_err(tp->dev, "no DMA mem for firmware\n");
1442 goto err_out; 1432 goto err_out;
1443 } 1433 }
1444 1434
@@ -1451,7 +1441,7 @@ typhoon_download_firmware(struct typhoon *tp)
1451 1441
1452 err = -ETIMEDOUT; 1442 err = -ETIMEDOUT;
1453 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { 1443 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1454 printk(KERN_ERR "%s: card ready timeout\n", tp->name); 1444 netdev_err(tp->dev, "card ready timeout\n");
1455 goto err_out_irq; 1445 goto err_out_irq;
1456 } 1446 }
1457 1447
@@ -1491,8 +1481,7 @@ typhoon_download_firmware(struct typhoon *tp)
1491 if(typhoon_wait_interrupt(ioaddr) < 0 || 1481 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1492 ioread32(ioaddr + TYPHOON_REG_STATUS) != 1482 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1493 TYPHOON_STATUS_WAITING_FOR_SEGMENT) { 1483 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1494 printk(KERN_ERR "%s: segment ready timeout\n", 1484 netdev_err(tp->dev, "segment ready timeout\n");
1495 tp->name);
1496 goto err_out_irq; 1485 goto err_out_irq;
1497 } 1486 }
1498 1487
@@ -1502,8 +1491,8 @@ typhoon_download_firmware(struct typhoon *tp)
1502 * the checksum, we can do this once, at the end. 1491 * the checksum, we can do this once, at the end.
1503 */ 1492 */
1504 csum = csum_fold(csum_partial_copy_nocheck(image_data, 1493 csum = csum_fold(csum_partial_copy_nocheck(image_data,
1505 dpage, len, 1494 dpage, len,
1506 0)); 1495 0));
1507 1496
1508 iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH); 1497 iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1509 iowrite32(le16_to_cpu((__force __le16)csum), 1498 iowrite32(le16_to_cpu((__force __le16)csum),
@@ -1514,7 +1503,7 @@ typhoon_download_firmware(struct typhoon *tp)
1514 iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO); 1503 iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1515 typhoon_post_pci_writes(ioaddr); 1504 typhoon_post_pci_writes(ioaddr);
1516 iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE, 1505 iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1517 ioaddr + TYPHOON_REG_COMMAND); 1506 ioaddr + TYPHOON_REG_COMMAND);
1518 1507
1519 image_data += len; 1508 image_data += len;
1520 load_addr += len; 1509 load_addr += len;
@@ -1525,15 +1514,15 @@ typhoon_download_firmware(struct typhoon *tp)
1525 if(typhoon_wait_interrupt(ioaddr) < 0 || 1514 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1526 ioread32(ioaddr + TYPHOON_REG_STATUS) != 1515 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1527 TYPHOON_STATUS_WAITING_FOR_SEGMENT) { 1516 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1528 printk(KERN_ERR "%s: final segment ready timeout\n", tp->name); 1517 netdev_err(tp->dev, "final segment ready timeout\n");
1529 goto err_out_irq; 1518 goto err_out_irq;
1530 } 1519 }
1531 1520
1532 iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND); 1521 iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1533 1522
1534 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) { 1523 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1535 printk(KERN_ERR "%s: boot ready timeout, status 0x%0x\n", 1524 netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
1536 tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS)); 1525 ioread32(ioaddr + TYPHOON_REG_STATUS));
1537 goto err_out_irq; 1526 goto err_out_irq;
1538 } 1527 }
1539 1528
@@ -1555,7 +1544,7 @@ typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1555 void __iomem *ioaddr = tp->ioaddr; 1544 void __iomem *ioaddr = tp->ioaddr;
1556 1545
1557 if(typhoon_wait_status(ioaddr, initial_status) < 0) { 1546 if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1558 printk(KERN_ERR "%s: boot ready timeout\n", tp->name); 1547 netdev_err(tp->dev, "boot ready timeout\n");
1559 goto out_timeout; 1548 goto out_timeout;
1560 } 1549 }
1561 1550
@@ -1566,8 +1555,8 @@ typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1566 ioaddr + TYPHOON_REG_COMMAND); 1555 ioaddr + TYPHOON_REG_COMMAND);
1567 1556
1568 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) { 1557 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1569 printk(KERN_ERR "%s: boot finish timeout (status 0x%x)\n", 1558 netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
1570 tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS)); 1559 ioread32(ioaddr + TYPHOON_REG_STATUS));
1571 goto out_timeout; 1560 goto out_timeout;
1572 } 1561 }
1573 1562
@@ -1866,8 +1855,7 @@ typhoon_interrupt(int irq, void *dev_instance)
1866 typhoon_post_pci_writes(ioaddr); 1855 typhoon_post_pci_writes(ioaddr);
1867 __napi_schedule(&tp->napi); 1856 __napi_schedule(&tp->napi);
1868 } else { 1857 } else {
1869 printk(KERN_ERR "%s: Error, poll already scheduled\n", 1858 netdev_err(dev, "Error, poll already scheduled\n");
1870 dev->name);
1871 } 1859 }
1872 return IRQ_HANDLED; 1860 return IRQ_HANDLED;
1873} 1861}
@@ -1900,16 +1888,15 @@ typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1900 xp_cmd.parm1 = events; 1888 xp_cmd.parm1 = events;
1901 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 1889 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1902 if(err < 0) { 1890 if(err < 0) {
1903 printk(KERN_ERR "%s: typhoon_sleep(): wake events cmd err %d\n", 1891 netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
1904 tp->name, err); 1892 err);
1905 return err; 1893 return err;
1906 } 1894 }
1907 1895
1908 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP); 1896 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1909 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 1897 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1910 if(err < 0) { 1898 if(err < 0) {
1911 printk(KERN_ERR "%s: typhoon_sleep(): sleep cmd err %d\n", 1899 netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
1912 tp->name, err);
1913 return err; 1900 return err;
1914 } 1901 }
1915 1902
@@ -1960,12 +1947,12 @@ typhoon_start_runtime(struct typhoon *tp)
1960 1947
1961 err = typhoon_download_firmware(tp); 1948 err = typhoon_download_firmware(tp);
1962 if(err < 0) { 1949 if(err < 0) {
1963 printk("%s: cannot load runtime on 3XP\n", tp->name); 1950 netdev_err(tp->dev, "cannot load runtime on 3XP\n");
1964 goto error_out; 1951 goto error_out;
1965 } 1952 }
1966 1953
1967 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) { 1954 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1968 printk("%s: cannot boot 3XP\n", tp->name); 1955 netdev_err(tp->dev, "cannot boot 3XP\n");
1969 err = -EIO; 1956 err = -EIO;
1970 goto error_out; 1957 goto error_out;
1971 } 1958 }
@@ -2069,9 +2056,7 @@ typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2069 } 2056 }
2070 2057
2071 if(i == TYPHOON_WAIT_TIMEOUT) 2058 if(i == TYPHOON_WAIT_TIMEOUT)
2072 printk(KERN_ERR 2059 netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
2073 "%s: halt timed out waiting for Tx to complete\n",
2074 tp->name);
2075 2060
2076 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE); 2061 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2077 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 2062 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
@@ -2088,11 +2073,10 @@ typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2088 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 2073 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2089 2074
2090 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0) 2075 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2091 printk(KERN_ERR "%s: timed out waiting for 3XP to halt\n", 2076 netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
2092 tp->name);
2093 2077
2094 if(typhoon_reset(ioaddr, wait_type) < 0) { 2078 if(typhoon_reset(ioaddr, wait_type) < 0) {
2095 printk(KERN_ERR "%s: unable to reset 3XP\n", tp->name); 2079 netdev_err(tp->dev, "unable to reset 3XP\n");
2096 return -ETIMEDOUT; 2080 return -ETIMEDOUT;
2097 } 2081 }
2098 2082
@@ -2111,8 +2095,7 @@ typhoon_tx_timeout(struct net_device *dev)
2111 struct typhoon *tp = netdev_priv(dev); 2095 struct typhoon *tp = netdev_priv(dev);
2112 2096
2113 if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) { 2097 if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2114 printk(KERN_WARNING "%s: could not reset in tx timeout\n", 2098 netdev_warn(dev, "could not reset in tx timeout\n");
2115 dev->name);
2116 goto truely_dead; 2099 goto truely_dead;
2117 } 2100 }
2118 2101
@@ -2121,8 +2104,7 @@ typhoon_tx_timeout(struct net_device *dev)
2121 typhoon_free_rx_rings(tp); 2104 typhoon_free_rx_rings(tp);
2122 2105
2123 if(typhoon_start_runtime(tp) < 0) { 2106 if(typhoon_start_runtime(tp) < 0) {
2124 printk(KERN_ERR "%s: could not start runtime in tx timeout\n", 2107 netdev_err(dev, "could not start runtime in tx timeout\n");
2125 dev->name);
2126 goto truely_dead; 2108 goto truely_dead;
2127 } 2109 }
2128 2110
@@ -2147,7 +2129,7 @@ typhoon_open(struct net_device *dev)
2147 2129
2148 err = typhoon_wakeup(tp, WaitSleep); 2130 err = typhoon_wakeup(tp, WaitSleep);
2149 if(err < 0) { 2131 if(err < 0) {
2150 printk(KERN_ERR "%s: unable to wakeup device\n", dev->name); 2132 netdev_err(dev, "unable to wakeup device\n");
2151 goto out_sleep; 2133 goto out_sleep;
2152 } 2134 }
2153 2135
@@ -2172,14 +2154,13 @@ out_irq:
2172 2154
2173out_sleep: 2155out_sleep:
2174 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { 2156 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2175 printk(KERN_ERR "%s: unable to reboot into sleep img\n", 2157 netdev_err(dev, "unable to reboot into sleep img\n");
2176 dev->name);
2177 typhoon_reset(tp->ioaddr, NoWait); 2158 typhoon_reset(tp->ioaddr, NoWait);
2178 goto out; 2159 goto out;
2179 } 2160 }
2180 2161
2181 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) 2162 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2182 printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name); 2163 netdev_err(dev, "unable to go back to sleep\n");
2183 2164
2184out: 2165out:
2185 return err; 2166 return err;
@@ -2194,7 +2175,7 @@ typhoon_close(struct net_device *dev)
2194 napi_disable(&tp->napi); 2175 napi_disable(&tp->napi);
2195 2176
2196 if(typhoon_stop_runtime(tp, WaitSleep) < 0) 2177 if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2197 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name); 2178 netdev_err(dev, "unable to stop runtime\n");
2198 2179
2199 /* Make sure there is no irq handler running on a different CPU. */ 2180 /* Make sure there is no irq handler running on a different CPU. */
2200 free_irq(dev->irq, dev); 2181 free_irq(dev->irq, dev);
@@ -2203,10 +2184,10 @@ typhoon_close(struct net_device *dev)
2203 typhoon_init_rings(tp); 2184 typhoon_init_rings(tp);
2204 2185
2205 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) 2186 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2206 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name); 2187 netdev_err(dev, "unable to boot sleep image\n");
2207 2188
2208 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) 2189 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2209 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name); 2190 netdev_err(dev, "unable to put card to sleep\n");
2210 2191
2211 return 0; 2192 return 0;
2212} 2193}
@@ -2224,14 +2205,12 @@ typhoon_resume(struct pci_dev *pdev)
2224 return 0; 2205 return 0;
2225 2206
2226 if(typhoon_wakeup(tp, WaitNoSleep) < 0) { 2207 if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2227 printk(KERN_ERR "%s: critical: could not wake up in resume\n", 2208 netdev_err(dev, "critical: could not wake up in resume\n");
2228 dev->name);
2229 goto reset; 2209 goto reset;
2230 } 2210 }
2231 2211
2232 if(typhoon_start_runtime(tp) < 0) { 2212 if(typhoon_start_runtime(tp) < 0) {
2233 printk(KERN_ERR "%s: critical: could not start runtime in " 2213 netdev_err(dev, "critical: could not start runtime in resume\n");
2234 "resume\n", dev->name);
2235 goto reset; 2214 goto reset;
2236 } 2215 }
2237 2216
@@ -2258,8 +2237,7 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2258 spin_lock_bh(&tp->state_lock); 2237 spin_lock_bh(&tp->state_lock);
2259 if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) { 2238 if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2260 spin_unlock_bh(&tp->state_lock); 2239 spin_unlock_bh(&tp->state_lock);
2261 printk(KERN_ERR "%s: cannot do WAKE_MAGIC with VLANS\n", 2240 netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
2262 dev->name);
2263 return -EBUSY; 2241 return -EBUSY;
2264 } 2242 }
2265 spin_unlock_bh(&tp->state_lock); 2243 spin_unlock_bh(&tp->state_lock);
@@ -2267,7 +2245,7 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2267 netif_device_detach(dev); 2245 netif_device_detach(dev);
2268 2246
2269 if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) { 2247 if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2270 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name); 2248 netdev_err(dev, "unable to stop runtime\n");
2271 goto need_resume; 2249 goto need_resume;
2272 } 2250 }
2273 2251
@@ -2275,7 +2253,7 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2275 typhoon_init_rings(tp); 2253 typhoon_init_rings(tp);
2276 2254
2277 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { 2255 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2278 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name); 2256 netdev_err(dev, "unable to boot sleep image\n");
2279 goto need_resume; 2257 goto need_resume;
2280 } 2258 }
2281 2259
@@ -2283,21 +2261,19 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2283 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0])); 2261 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2284 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2])); 2262 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
2285 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) { 2263 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2286 printk(KERN_ERR "%s: unable to set mac address in suspend\n", 2264 netdev_err(dev, "unable to set mac address in suspend\n");
2287 dev->name);
2288 goto need_resume; 2265 goto need_resume;
2289 } 2266 }
2290 2267
2291 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER); 2268 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2292 xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST; 2269 xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2293 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) { 2270 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2294 printk(KERN_ERR "%s: unable to set rx filter in suspend\n", 2271 netdev_err(dev, "unable to set rx filter in suspend\n");
2295 dev->name);
2296 goto need_resume; 2272 goto need_resume;
2297 } 2273 }
2298 2274
2299 if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) { 2275 if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
2300 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name); 2276 netdev_err(dev, "unable to put card to sleep\n");
2301 goto need_resume; 2277 goto need_resume;
2302 } 2278 }
2303 2279
@@ -2351,7 +2327,7 @@ out_unmap:
2351 2327
2352out: 2328out:
2353 if(!mode) 2329 if(!mode)
2354 printk(KERN_INFO PFX "falling back to port IO\n"); 2330 pr_info("%s: falling back to port IO\n", pci_name(pdev));
2355 return mode; 2331 return mode;
2356} 2332}
2357 2333
@@ -2371,7 +2347,6 @@ static const struct net_device_ops typhoon_netdev_ops = {
2371static int __devinit 2347static int __devinit
2372typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 2348typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2373{ 2349{
2374 static int did_version = 0;
2375 struct net_device *dev; 2350 struct net_device *dev;
2376 struct typhoon *tp; 2351 struct typhoon *tp;
2377 int card_id = (int) ent->driver_data; 2352 int card_id = (int) ent->driver_data;
@@ -2381,14 +2356,11 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2381 struct cmd_desc xp_cmd; 2356 struct cmd_desc xp_cmd;
2382 struct resp_desc xp_resp[3]; 2357 struct resp_desc xp_resp[3];
2383 int err = 0; 2358 int err = 0;
2384 2359 const char *err_msg;
2385 if(!did_version++)
2386 printk(KERN_INFO "%s", version);
2387 2360
2388 dev = alloc_etherdev(sizeof(*tp)); 2361 dev = alloc_etherdev(sizeof(*tp));
2389 if(dev == NULL) { 2362 if(dev == NULL) {
2390 printk(ERR_PFX "%s: unable to alloc new net device\n", 2363 err_msg = "unable to alloc new net device";
2391 pci_name(pdev));
2392 err = -ENOMEM; 2364 err = -ENOMEM;
2393 goto error_out; 2365 goto error_out;
2394 } 2366 }
@@ -2396,57 +2368,48 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2396 2368
2397 err = pci_enable_device(pdev); 2369 err = pci_enable_device(pdev);
2398 if(err < 0) { 2370 if(err < 0) {
2399 printk(ERR_PFX "%s: unable to enable device\n", 2371 err_msg = "unable to enable device";
2400 pci_name(pdev));
2401 goto error_out_dev; 2372 goto error_out_dev;
2402 } 2373 }
2403 2374
2404 err = pci_set_mwi(pdev); 2375 err = pci_set_mwi(pdev);
2405 if(err < 0) { 2376 if(err < 0) {
2406 printk(ERR_PFX "%s: unable to set MWI\n", pci_name(pdev)); 2377 err_msg = "unable to set MWI";
2407 goto error_out_disable; 2378 goto error_out_disable;
2408 } 2379 }
2409 2380
2410 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2381 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2411 if(err < 0) { 2382 if(err < 0) {
2412 printk(ERR_PFX "%s: No usable DMA configuration\n", 2383 err_msg = "No usable DMA configuration";
2413 pci_name(pdev));
2414 goto error_out_mwi; 2384 goto error_out_mwi;
2415 } 2385 }
2416 2386
2417 /* sanity checks on IO and MMIO BARs 2387 /* sanity checks on IO and MMIO BARs
2418 */ 2388 */
2419 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) { 2389 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2420 printk(ERR_PFX 2390 err_msg = "region #1 not a PCI IO resource, aborting";
2421 "%s: region #1 not a PCI IO resource, aborting\n",
2422 pci_name(pdev));
2423 err = -ENODEV; 2391 err = -ENODEV;
2424 goto error_out_mwi; 2392 goto error_out_mwi;
2425 } 2393 }
2426 if(pci_resource_len(pdev, 0) < 128) { 2394 if(pci_resource_len(pdev, 0) < 128) {
2427 printk(ERR_PFX "%s: Invalid PCI IO region size, aborting\n", 2395 err_msg = "Invalid PCI IO region size, aborting";
2428 pci_name(pdev));
2429 err = -ENODEV; 2396 err = -ENODEV;
2430 goto error_out_mwi; 2397 goto error_out_mwi;
2431 } 2398 }
2432 if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { 2399 if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2433 printk(ERR_PFX 2400 err_msg = "region #1 not a PCI MMIO resource, aborting";
2434 "%s: region #1 not a PCI MMIO resource, aborting\n",
2435 pci_name(pdev));
2436 err = -ENODEV; 2401 err = -ENODEV;
2437 goto error_out_mwi; 2402 goto error_out_mwi;
2438 } 2403 }
2439 if(pci_resource_len(pdev, 1) < 128) { 2404 if(pci_resource_len(pdev, 1) < 128) {
2440 printk(ERR_PFX "%s: Invalid PCI MMIO region size, aborting\n", 2405 err_msg = "Invalid PCI MMIO region size, aborting";
2441 pci_name(pdev));
2442 err = -ENODEV; 2406 err = -ENODEV;
2443 goto error_out_mwi; 2407 goto error_out_mwi;
2444 } 2408 }
2445 2409
2446 err = pci_request_regions(pdev, "typhoon"); 2410 err = pci_request_regions(pdev, KBUILD_MODNAME);
2447 if(err < 0) { 2411 if(err < 0) {
2448 printk(ERR_PFX "%s: could not request regions\n", 2412 err_msg = "could not request regions";
2449 pci_name(pdev));
2450 goto error_out_mwi; 2413 goto error_out_mwi;
2451 } 2414 }
2452 2415
@@ -2457,8 +2420,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2457 2420
2458 ioaddr = pci_iomap(pdev, use_mmio, 128); 2421 ioaddr = pci_iomap(pdev, use_mmio, 128);
2459 if (!ioaddr) { 2422 if (!ioaddr) {
2460 printk(ERR_PFX "%s: cannot remap registers, aborting\n", 2423 err_msg = "cannot remap registers, aborting";
2461 pci_name(pdev));
2462 err = -EIO; 2424 err = -EIO;
2463 goto error_out_regions; 2425 goto error_out_regions;
2464 } 2426 }
@@ -2468,8 +2430,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2468 shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared), 2430 shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2469 &shared_dma); 2431 &shared_dma);
2470 if(!shared) { 2432 if(!shared) {
2471 printk(ERR_PFX "%s: could not allocate DMA memory\n", 2433 err_msg = "could not allocate DMA memory";
2472 pci_name(pdev));
2473 err = -ENOMEM; 2434 err = -ENOMEM;
2474 goto error_out_remap; 2435 goto error_out_remap;
2475 } 2436 }
@@ -2492,7 +2453,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2492 * 5) Put the card to sleep. 2453 * 5) Put the card to sleep.
2493 */ 2454 */
2494 if (typhoon_reset(ioaddr, WaitSleep) < 0) { 2455 if (typhoon_reset(ioaddr, WaitSleep) < 0) {
2495 printk(ERR_PFX "%s: could not reset 3XP\n", pci_name(pdev)); 2456 err_msg = "could not reset 3XP";
2496 err = -EIO; 2457 err = -EIO;
2497 goto error_out_dma; 2458 goto error_out_dma;
2498 } 2459 }
@@ -2504,26 +2465,18 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2504 pci_set_master(pdev); 2465 pci_set_master(pdev);
2505 pci_save_state(pdev); 2466 pci_save_state(pdev);
2506 2467
2507 /* dev->name is not valid until we register, but we need to
2508 * use some common routines to initialize the card. So that those
2509 * routines print the right name, we keep our oun pointer to the name
2510 */
2511 tp->name = pci_name(pdev);
2512
2513 typhoon_init_interface(tp); 2468 typhoon_init_interface(tp);
2514 typhoon_init_rings(tp); 2469 typhoon_init_rings(tp);
2515 2470
2516 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { 2471 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2517 printk(ERR_PFX "%s: cannot boot 3XP sleep image\n", 2472 err_msg = "cannot boot 3XP sleep image";
2518 pci_name(pdev));
2519 err = -EIO; 2473 err = -EIO;
2520 goto error_out_reset; 2474 goto error_out_reset;
2521 } 2475 }
2522 2476
2523 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS); 2477 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2524 if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) { 2478 if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2525 printk(ERR_PFX "%s: cannot read MAC address\n", 2479 err_msg = "cannot read MAC address";
2526 pci_name(pdev));
2527 err = -EIO; 2480 err = -EIO;
2528 goto error_out_reset; 2481 goto error_out_reset;
2529 } 2482 }
@@ -2532,8 +2485,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2532 *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2)); 2485 *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2533 2486
2534 if(!is_valid_ether_addr(dev->dev_addr)) { 2487 if(!is_valid_ether_addr(dev->dev_addr)) {
2535 printk(ERR_PFX "%s: Could not obtain valid ethernet address, " 2488 err_msg = "Could not obtain valid ethernet address, aborting";
2536 "aborting\n", pci_name(pdev));
2537 goto error_out_reset; 2489 goto error_out_reset;
2538 } 2490 }
2539 2491
@@ -2542,8 +2494,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2542 */ 2494 */
2543 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS); 2495 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2544 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) { 2496 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2545 printk(ERR_PFX "%s: Could not get Sleep Image version\n", 2497 err_msg = "Could not get Sleep Image version";
2546 pci_name(pdev));
2547 goto error_out_reset; 2498 goto error_out_reset;
2548 } 2499 }
2549 2500
@@ -2560,8 +2511,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2560 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET; 2511 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2561 2512
2562 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) { 2513 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
2563 printk(ERR_PFX "%s: cannot put adapter to sleep\n", 2514 err_msg = "cannot put adapter to sleep";
2564 pci_name(pdev));
2565 err = -EIO; 2515 err = -EIO;
2566 goto error_out_reset; 2516 goto error_out_reset;
2567 } 2517 }
@@ -2580,19 +2530,18 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2580 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 2530 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2581 dev->features |= NETIF_F_TSO; 2531 dev->features |= NETIF_F_TSO;
2582 2532
2583 if(register_netdev(dev) < 0) 2533 if(register_netdev(dev) < 0) {
2534 err_msg = "unable to register netdev";
2584 goto error_out_reset; 2535 goto error_out_reset;
2585 2536 }
2586 /* fixup our local name */
2587 tp->name = dev->name;
2588 2537
2589 pci_set_drvdata(pdev, dev); 2538 pci_set_drvdata(pdev, dev);
2590 2539
2591 printk(KERN_INFO "%s: %s at %s 0x%llx, %pM\n", 2540 netdev_info(dev, "%s at %s 0x%llx, %pM\n",
2592 dev->name, typhoon_card_info[card_id].name, 2541 typhoon_card_info[card_id].name,
2593 use_mmio ? "MMIO" : "IO", 2542 use_mmio ? "MMIO" : "IO",
2594 (unsigned long long)pci_resource_start(pdev, use_mmio), 2543 (unsigned long long)pci_resource_start(pdev, use_mmio),
2595 dev->dev_addr); 2544 dev->dev_addr);
2596 2545
2597 /* xp_resp still contains the response to the READ_VERSIONS command. 2546 /* xp_resp still contains the response to the READ_VERSIONS command.
2598 * For debugging, let the user know what version he has. 2547 * For debugging, let the user know what version he has.
@@ -2602,23 +2551,20 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2602 * of version is Month/Day of build. 2551 * of version is Month/Day of build.
2603 */ 2552 */
2604 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff; 2553 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2605 printk(KERN_INFO "%s: Typhoon 1.0 Sleep Image built " 2554 netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
2606 "%02u/%02u/2000\n", dev->name, monthday >> 8, 2555 monthday >> 8, monthday & 0xff);
2607 monthday & 0xff);
2608 } else if(xp_resp[0].numDesc == 2) { 2556 } else if(xp_resp[0].numDesc == 2) {
2609 /* This is the Typhoon 1.1+ type Sleep Image 2557 /* This is the Typhoon 1.1+ type Sleep Image
2610 */ 2558 */
2611 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2); 2559 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2612 u8 *ver_string = (u8 *) &xp_resp[1]; 2560 u8 *ver_string = (u8 *) &xp_resp[1];
2613 ver_string[25] = 0; 2561 ver_string[25] = 0;
2614 printk(KERN_INFO "%s: Typhoon 1.1+ Sleep Image version " 2562 netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
2615 "%02x.%03x.%03x %s\n", dev->name, sleep_ver >> 24, 2563 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
2616 (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff, 2564 sleep_ver & 0xfff, ver_string);
2617 ver_string);
2618 } else { 2565 } else {
2619 printk(KERN_WARNING "%s: Unknown Sleep Image version " 2566 netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
2620 "(%u:%04x)\n", dev->name, xp_resp[0].numDesc, 2567 xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
2621 le32_to_cpu(xp_resp[0].parm2));
2622 } 2568 }
2623 2569
2624 return 0; 2570 return 0;
@@ -2640,6 +2586,7 @@ error_out_disable:
2640error_out_dev: 2586error_out_dev:
2641 free_netdev(dev); 2587 free_netdev(dev);
2642error_out: 2588error_out:
2589 pr_err("%s: %s\n", pci_name(pdev), err_msg);
2643 return err; 2590 return err;
2644} 2591}
2645 2592
@@ -2664,7 +2611,7 @@ typhoon_remove_one(struct pci_dev *pdev)
2664} 2611}
2665 2612
2666static struct pci_driver typhoon_driver = { 2613static struct pci_driver typhoon_driver = {
2667 .name = DRV_MODULE_NAME, 2614 .name = KBUILD_MODNAME,
2668 .id_table = typhoon_pci_tbl, 2615 .id_table = typhoon_pci_tbl,
2669 .probe = typhoon_init_one, 2616 .probe = typhoon_init_one,
2670 .remove = __devexit_p(typhoon_remove_one), 2617 .remove = __devexit_p(typhoon_remove_one),
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index eb8fe7e16c6c..23a97518bc1f 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -37,6 +37,7 @@
37#include <asm/qe.h> 37#include <asm/qe.h>
38#include <asm/ucc.h> 38#include <asm/ucc.h>
39#include <asm/ucc_fast.h> 39#include <asm/ucc_fast.h>
40#include <asm/machdep.h>
40 41
41#include "ucc_geth.h" 42#include "ucc_geth.h"
42#include "fsl_pq_mdio.h" 43#include "fsl_pq_mdio.h"
@@ -1334,7 +1335,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1334 struct ucc_geth __iomem *ug_regs; 1335 struct ucc_geth __iomem *ug_regs;
1335 struct ucc_fast __iomem *uf_regs; 1336 struct ucc_fast __iomem *uf_regs;
1336 int ret_val; 1337 int ret_val;
1337 u32 upsmr, maccfg2, tbiBaseAddress; 1338 u32 upsmr, maccfg2;
1338 u16 value; 1339 u16 value;
1339 1340
1340 ugeth_vdbg("%s: IN", __func__); 1341 ugeth_vdbg("%s: IN", __func__);
@@ -1389,14 +1390,20 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1389 /* Note that this depends on proper setting in utbipar register. */ 1390 /* Note that this depends on proper setting in utbipar register. */
1390 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || 1391 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
1391 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1392 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1392 tbiBaseAddress = in_be32(&ug_regs->utbipar); 1393 struct ucc_geth_info *ug_info = ugeth->ug_info;
1393 tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK; 1394 struct phy_device *tbiphy;
1394 tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT; 1395
1395 value = ugeth->phydev->bus->read(ugeth->phydev->bus, 1396 if (!ug_info->tbi_node)
1396 (u8) tbiBaseAddress, ENET_TBI_MII_CR); 1397 ugeth_warn("TBI mode requires that the device "
1398 "tree specify a tbi-handle\n");
1399
1400 tbiphy = of_phy_find_device(ug_info->tbi_node);
1401 if (!tbiphy)
1402 ugeth_warn("Could not get TBI device\n");
1403
1404 value = phy_read(tbiphy, ENET_TBI_MII_CR);
1397 value &= ~0x1000; /* Turn off autonegotiation */ 1405 value &= ~0x1000; /* Turn off autonegotiation */
1398 ugeth->phydev->bus->write(ugeth->phydev->bus, 1406 phy_write(tbiphy, ENET_TBI_MII_CR, value);
1399 (u8) tbiBaseAddress, ENET_TBI_MII_CR, value);
1400 } 1407 }
1401 1408
1402 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); 1409 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
@@ -1995,7 +2002,6 @@ static void ucc_geth_set_multi(struct net_device *dev)
1995 struct dev_mc_list *dmi; 2002 struct dev_mc_list *dmi;
1996 struct ucc_fast __iomem *uf_regs; 2003 struct ucc_fast __iomem *uf_regs;
1997 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 2004 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
1998 int i;
1999 2005
2000 ugeth = netdev_priv(dev); 2006 ugeth = netdev_priv(dev);
2001 2007
@@ -2022,10 +2028,7 @@ static void ucc_geth_set_multi(struct net_device *dev)
2022 out_be32(&p_82xx_addr_filt->gaddr_h, 0x0); 2028 out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
2023 out_be32(&p_82xx_addr_filt->gaddr_l, 0x0); 2029 out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
2024 2030
2025 dmi = dev->mc_list; 2031 netdev_for_each_mc_addr(dmi, dev) {
2026
2027 for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
2028
2029 /* Only support group multicast for now. 2032 /* Only support group multicast for now.
2030 */ 2033 */
2031 if (!(dmi->dmi_addr[0] & 1)) 2034 if (!(dmi->dmi_addr[0] & 1))
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index a516185cbc9f..20e34608fa4a 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -184,8 +184,8 @@ static int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
184 void *buf; 184 void *buf;
185 int err = -ENOMEM; 185 int err = -ENOMEM;
186 186
187 devdbg(dev,"asix_read_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d", 187 netdev_dbg(dev->net, "asix_read_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
188 cmd, value, index, size); 188 cmd, value, index, size);
189 189
190 buf = kmalloc(size, GFP_KERNEL); 190 buf = kmalloc(size, GFP_KERNEL);
191 if (!buf) 191 if (!buf)
@@ -217,8 +217,8 @@ static int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
217 void *buf = NULL; 217 void *buf = NULL;
218 int err = -ENOMEM; 218 int err = -ENOMEM;
219 219
220 devdbg(dev,"asix_write_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d", 220 netdev_dbg(dev->net, "asix_write_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
221 cmd, value, index, size); 221 cmd, value, index, size);
222 222
223 if (data) { 223 if (data) {
224 buf = kmalloc(size, GFP_KERNEL); 224 buf = kmalloc(size, GFP_KERNEL);
@@ -264,15 +264,15 @@ asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
264 int status; 264 int status;
265 struct urb *urb; 265 struct urb *urb;
266 266
267 devdbg(dev,"asix_write_cmd_async() cmd=0x%02x value=0x%04x index=0x%04x size=%d", 267 netdev_dbg(dev->net, "asix_write_cmd_async() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
268 cmd, value, index, size); 268 cmd, value, index, size);
269 if ((urb = usb_alloc_urb(0, GFP_ATOMIC)) == NULL) { 269 if ((urb = usb_alloc_urb(0, GFP_ATOMIC)) == NULL) {
270 deverr(dev, "Error allocating URB in write_cmd_async!"); 270 netdev_err(dev->net, "Error allocating URB in write_cmd_async!\n");
271 return; 271 return;
272 } 272 }
273 273
274 if ((req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC)) == NULL) { 274 if ((req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC)) == NULL) {
275 deverr(dev, "Failed to allocate memory for control request"); 275 netdev_err(dev->net, "Failed to allocate memory for control request\n");
276 usb_free_urb(urb); 276 usb_free_urb(urb);
277 return; 277 return;
278 } 278 }
@@ -289,8 +289,8 @@ asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
289 asix_async_cmd_callback, req); 289 asix_async_cmd_callback, req);
290 290
291 if((status = usb_submit_urb(urb, GFP_ATOMIC)) < 0) { 291 if((status = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
292 deverr(dev, "Error submitting the control message: status=%d", 292 netdev_err(dev->net, "Error submitting the control message: status=%d\n",
293 status); 293 status);
294 kfree(req); 294 kfree(req);
295 usb_free_urb(urb); 295 usb_free_urb(urb);
296 } 296 }
@@ -314,7 +314,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
314 while (skb->len > 0) { 314 while (skb->len > 0) {
315 if ((short)(header & 0x0000ffff) != 315 if ((short)(header & 0x0000ffff) !=
316 ~((short)((header & 0xffff0000) >> 16))) { 316 ~((short)((header & 0xffff0000) >> 16))) {
317 deverr(dev,"asix_rx_fixup() Bad Header Length"); 317 netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n");
318 } 318 }
319 /* get the packet length */ 319 /* get the packet length */
320 size = (u16) (header & 0x0000ffff); 320 size = (u16) (header & 0x0000ffff);
@@ -322,7 +322,8 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
322 if ((skb->len) - ((size + 1) & 0xfffe) == 0) 322 if ((skb->len) - ((size + 1) & 0xfffe) == 0)
323 return 2; 323 return 2;
324 if (size > ETH_FRAME_LEN) { 324 if (size > ETH_FRAME_LEN) {
325 deverr(dev,"asix_rx_fixup() Bad RX Length %d", size); 325 netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
326 size);
326 return 0; 327 return 0;
327 } 328 }
328 ax_skb = skb_clone(skb, GFP_ATOMIC); 329 ax_skb = skb_clone(skb, GFP_ATOMIC);
@@ -348,7 +349,8 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
348 } 349 }
349 350
350 if (skb->len < 0) { 351 if (skb->len < 0) {
351 deverr(dev,"asix_rx_fixup() Bad SKB Length %d", skb->len); 352 netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d\n",
353 skb->len);
352 return 0; 354 return 0;
353 } 355 }
354 return 1; 356 return 1;
@@ -409,7 +411,7 @@ static void asix_status(struct usbnet *dev, struct urb *urb)
409 usbnet_defer_kevent (dev, EVENT_LINK_RESET ); 411 usbnet_defer_kevent (dev, EVENT_LINK_RESET );
410 } else 412 } else
411 netif_carrier_off(dev->net); 413 netif_carrier_off(dev->net);
412 devdbg(dev, "Link Status is: %d", link); 414 netdev_dbg(dev->net, "Link Status is: %d\n", link);
413 } 415 }
414} 416}
415 417
@@ -418,7 +420,7 @@ static inline int asix_set_sw_mii(struct usbnet *dev)
418 int ret; 420 int ret;
419 ret = asix_write_cmd(dev, AX_CMD_SET_SW_MII, 0x0000, 0, 0, NULL); 421 ret = asix_write_cmd(dev, AX_CMD_SET_SW_MII, 0x0000, 0, 0, NULL);
420 if (ret < 0) 422 if (ret < 0)
421 deverr(dev, "Failed to enable software MII access"); 423 netdev_err(dev->net, "Failed to enable software MII access\n");
422 return ret; 424 return ret;
423} 425}
424 426
@@ -427,7 +429,7 @@ static inline int asix_set_hw_mii(struct usbnet *dev)
427 int ret; 429 int ret;
428 ret = asix_write_cmd(dev, AX_CMD_SET_HW_MII, 0x0000, 0, 0, NULL); 430 ret = asix_write_cmd(dev, AX_CMD_SET_HW_MII, 0x0000, 0, 0, NULL);
429 if (ret < 0) 431 if (ret < 0)
430 deverr(dev, "Failed to enable hardware MII access"); 432 netdev_err(dev->net, "Failed to enable hardware MII access\n");
431 return ret; 433 return ret;
432} 434}
433 435
@@ -436,13 +438,14 @@ static inline int asix_get_phy_addr(struct usbnet *dev)
436 u8 buf[2]; 438 u8 buf[2];
437 int ret = asix_read_cmd(dev, AX_CMD_READ_PHY_ID, 0, 0, 2, buf); 439 int ret = asix_read_cmd(dev, AX_CMD_READ_PHY_ID, 0, 0, 2, buf);
438 440
439 devdbg(dev, "asix_get_phy_addr()"); 441 netdev_dbg(dev->net, "asix_get_phy_addr()\n");
440 442
441 if (ret < 0) { 443 if (ret < 0) {
442 deverr(dev, "Error reading PHYID register: %02x", ret); 444 netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret);
443 goto out; 445 goto out;
444 } 446 }
445 devdbg(dev, "asix_get_phy_addr() returning 0x%04x", *((__le16 *)buf)); 447 netdev_dbg(dev->net, "asix_get_phy_addr() returning 0x%04x\n",
448 *((__le16 *)buf));
446 ret = buf[1]; 449 ret = buf[1];
447 450
448out: 451out:
@@ -455,7 +458,7 @@ static int asix_sw_reset(struct usbnet *dev, u8 flags)
455 458
456 ret = asix_write_cmd(dev, AX_CMD_SW_RESET, flags, 0, 0, NULL); 459 ret = asix_write_cmd(dev, AX_CMD_SW_RESET, flags, 0, 0, NULL);
457 if (ret < 0) 460 if (ret < 0)
458 deverr(dev,"Failed to send software reset: %02x", ret); 461 netdev_err(dev->net, "Failed to send software reset: %02x\n", ret);
459 462
460 return ret; 463 return ret;
461} 464}
@@ -466,7 +469,7 @@ static u16 asix_read_rx_ctl(struct usbnet *dev)
466 int ret = asix_read_cmd(dev, AX_CMD_READ_RX_CTL, 0, 0, 2, &v); 469 int ret = asix_read_cmd(dev, AX_CMD_READ_RX_CTL, 0, 0, 2, &v);
467 470
468 if (ret < 0) { 471 if (ret < 0) {
469 deverr(dev, "Error reading RX_CTL register: %02x", ret); 472 netdev_err(dev->net, "Error reading RX_CTL register: %02x\n", ret);
470 goto out; 473 goto out;
471 } 474 }
472 ret = le16_to_cpu(v); 475 ret = le16_to_cpu(v);
@@ -478,11 +481,11 @@ static int asix_write_rx_ctl(struct usbnet *dev, u16 mode)
478{ 481{
479 int ret; 482 int ret;
480 483
481 devdbg(dev,"asix_write_rx_ctl() - mode = 0x%04x", mode); 484 netdev_dbg(dev->net, "asix_write_rx_ctl() - mode = 0x%04x\n", mode);
482 ret = asix_write_cmd(dev, AX_CMD_WRITE_RX_CTL, mode, 0, 0, NULL); 485 ret = asix_write_cmd(dev, AX_CMD_WRITE_RX_CTL, mode, 0, 0, NULL);
483 if (ret < 0) 486 if (ret < 0)
484 deverr(dev, "Failed to write RX_CTL mode to 0x%04x: %02x", 487 netdev_err(dev->net, "Failed to write RX_CTL mode to 0x%04x: %02x\n",
485 mode, ret); 488 mode, ret);
486 489
487 return ret; 490 return ret;
488} 491}
@@ -493,7 +496,8 @@ static u16 asix_read_medium_status(struct usbnet *dev)
493 int ret = asix_read_cmd(dev, AX_CMD_READ_MEDIUM_STATUS, 0, 0, 2, &v); 496 int ret = asix_read_cmd(dev, AX_CMD_READ_MEDIUM_STATUS, 0, 0, 2, &v);
494 497
495 if (ret < 0) { 498 if (ret < 0) {
496 deverr(dev, "Error reading Medium Status register: %02x", ret); 499 netdev_err(dev->net, "Error reading Medium Status register: %02x\n",
500 ret);
497 goto out; 501 goto out;
498 } 502 }
499 ret = le16_to_cpu(v); 503 ret = le16_to_cpu(v);
@@ -505,11 +509,11 @@ static int asix_write_medium_mode(struct usbnet *dev, u16 mode)
505{ 509{
506 int ret; 510 int ret;
507 511
508 devdbg(dev,"asix_write_medium_mode() - mode = 0x%04x", mode); 512 netdev_dbg(dev->net, "asix_write_medium_mode() - mode = 0x%04x\n", mode);
509 ret = asix_write_cmd(dev, AX_CMD_WRITE_MEDIUM_MODE, mode, 0, 0, NULL); 513 ret = asix_write_cmd(dev, AX_CMD_WRITE_MEDIUM_MODE, mode, 0, 0, NULL);
510 if (ret < 0) 514 if (ret < 0)
511 deverr(dev, "Failed to write Medium Mode mode to 0x%04x: %02x", 515 netdev_err(dev->net, "Failed to write Medium Mode mode to 0x%04x: %02x\n",
512 mode, ret); 516 mode, ret);
513 517
514 return ret; 518 return ret;
515} 519}
@@ -518,11 +522,11 @@ static int asix_write_gpio(struct usbnet *dev, u16 value, int sleep)
518{ 522{
519 int ret; 523 int ret;
520 524
521 devdbg(dev,"asix_write_gpio() - value = 0x%04x", value); 525 netdev_dbg(dev->net, "asix_write_gpio() - value = 0x%04x\n", value);
522 ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS, value, 0, 0, NULL); 526 ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS, value, 0, 0, NULL);
523 if (ret < 0) 527 if (ret < 0)
524 deverr(dev, "Failed to write GPIO value 0x%04x: %02x", 528 netdev_err(dev->net, "Failed to write GPIO value 0x%04x: %02x\n",
525 value, ret); 529 value, ret);
526 530
527 if (sleep) 531 if (sleep)
528 msleep(sleep); 532 msleep(sleep);
@@ -542,29 +546,27 @@ static void asix_set_multicast(struct net_device *net)
542 if (net->flags & IFF_PROMISC) { 546 if (net->flags & IFF_PROMISC) {
543 rx_ctl |= AX_RX_CTL_PRO; 547 rx_ctl |= AX_RX_CTL_PRO;
544 } else if (net->flags & IFF_ALLMULTI || 548 } else if (net->flags & IFF_ALLMULTI ||
545 net->mc_count > AX_MAX_MCAST) { 549 netdev_mc_count(net) > AX_MAX_MCAST) {
546 rx_ctl |= AX_RX_CTL_AMALL; 550 rx_ctl |= AX_RX_CTL_AMALL;
547 } else if (net->mc_count == 0) { 551 } else if (netdev_mc_empty(net)) {
548 /* just broadcast and directed */ 552 /* just broadcast and directed */
549 } else { 553 } else {
550 /* We use the 20 byte dev->data 554 /* We use the 20 byte dev->data
551 * for our 8 byte filter buffer 555 * for our 8 byte filter buffer
552 * to avoid allocating memory that 556 * to avoid allocating memory that
553 * is tricky to free later */ 557 * is tricky to free later */
554 struct dev_mc_list *mc_list = net->mc_list; 558 struct dev_mc_list *mc_list;
555 u32 crc_bits; 559 u32 crc_bits;
556 int i;
557 560
558 memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE); 561 memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
559 562
560 /* Build the multicast hash filter. */ 563 /* Build the multicast hash filter. */
561 for (i = 0; i < net->mc_count; i++) { 564 netdev_for_each_mc_addr(mc_list, net) {
562 crc_bits = 565 crc_bits =
563 ether_crc(ETH_ALEN, 566 ether_crc(ETH_ALEN,
564 mc_list->dmi_addr) >> 26; 567 mc_list->dmi_addr) >> 26;
565 data->multi_filter[crc_bits >> 3] |= 568 data->multi_filter[crc_bits >> 3] |=
566 1 << (crc_bits & 7); 569 1 << (crc_bits & 7);
567 mc_list = mc_list->next;
568 } 570 }
569 571
570 asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0, 572 asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0,
@@ -588,7 +590,8 @@ static int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
588 asix_set_hw_mii(dev); 590 asix_set_hw_mii(dev);
589 mutex_unlock(&dev->phy_mutex); 591 mutex_unlock(&dev->phy_mutex);
590 592
591 devdbg(dev, "asix_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x", phy_id, loc, le16_to_cpu(res)); 593 netdev_dbg(dev->net, "asix_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
594 phy_id, loc, le16_to_cpu(res));
592 595
593 return le16_to_cpu(res); 596 return le16_to_cpu(res);
594} 597}
@@ -599,7 +602,8 @@ asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
599 struct usbnet *dev = netdev_priv(netdev); 602 struct usbnet *dev = netdev_priv(netdev);
600 __le16 res = cpu_to_le16(val); 603 __le16 res = cpu_to_le16(val);
601 604
602 devdbg(dev, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x", phy_id, loc, val); 605 netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
606 phy_id, loc, val);
603 mutex_lock(&dev->phy_mutex); 607 mutex_lock(&dev->phy_mutex);
604 asix_set_sw_mii(dev); 608 asix_set_sw_mii(dev);
605 asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2, &res); 609 asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2, &res);
@@ -754,29 +758,27 @@ static void ax88172_set_multicast(struct net_device *net)
754 if (net->flags & IFF_PROMISC) { 758 if (net->flags & IFF_PROMISC) {
755 rx_ctl |= 0x01; 759 rx_ctl |= 0x01;
756 } else if (net->flags & IFF_ALLMULTI || 760 } else if (net->flags & IFF_ALLMULTI ||
757 net->mc_count > AX_MAX_MCAST) { 761 netdev_mc_count(net) > AX_MAX_MCAST) {
758 rx_ctl |= 0x02; 762 rx_ctl |= 0x02;
759 } else if (net->mc_count == 0) { 763 } else if (netdev_mc_empty(net)) {
760 /* just broadcast and directed */ 764 /* just broadcast and directed */
761 } else { 765 } else {
762 /* We use the 20 byte dev->data 766 /* We use the 20 byte dev->data
763 * for our 8 byte filter buffer 767 * for our 8 byte filter buffer
764 * to avoid allocating memory that 768 * to avoid allocating memory that
765 * is tricky to free later */ 769 * is tricky to free later */
766 struct dev_mc_list *mc_list = net->mc_list; 770 struct dev_mc_list *mc_list;
767 u32 crc_bits; 771 u32 crc_bits;
768 int i;
769 772
770 memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE); 773 memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
771 774
772 /* Build the multicast hash filter. */ 775 /* Build the multicast hash filter. */
773 for (i = 0; i < net->mc_count; i++) { 776 netdev_for_each_mc_addr(mc_list, net) {
774 crc_bits = 777 crc_bits =
775 ether_crc(ETH_ALEN, 778 ether_crc(ETH_ALEN,
776 mc_list->dmi_addr) >> 26; 779 mc_list->dmi_addr) >> 26;
777 data->multi_filter[crc_bits >> 3] |= 780 data->multi_filter[crc_bits >> 3] |=
778 1 << (crc_bits & 7); 781 1 << (crc_bits & 7);
779 mc_list = mc_list->next;
780 } 782 }
781 783
782 asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0, 784 asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0,
@@ -800,7 +802,8 @@ static int ax88172_link_reset(struct usbnet *dev)
800 if (ecmd.duplex != DUPLEX_FULL) 802 if (ecmd.duplex != DUPLEX_FULL)
801 mode |= ~AX88172_MEDIUM_FD; 803 mode |= ~AX88172_MEDIUM_FD;
802 804
803 devdbg(dev, "ax88172_link_reset() speed: %d duplex: %d setting mode to 0x%04x", ecmd.speed, ecmd.duplex, mode); 805 netdev_dbg(dev->net, "ax88172_link_reset() speed: %d duplex: %d setting mode to 0x%04x\n",
806 ecmd.speed, ecmd.duplex, mode);
804 807
805 asix_write_medium_mode(dev, mode); 808 asix_write_medium_mode(dev, mode);
806 809
@@ -902,7 +905,8 @@ static int ax88772_link_reset(struct usbnet *dev)
902 if (ecmd.duplex != DUPLEX_FULL) 905 if (ecmd.duplex != DUPLEX_FULL)
903 mode &= ~AX_MEDIUM_FD; 906 mode &= ~AX_MEDIUM_FD;
904 907
905 devdbg(dev, "ax88772_link_reset() speed: %d duplex: %d setting mode to 0x%04x", ecmd.speed, ecmd.duplex, mode); 908 netdev_dbg(dev->net, "ax88772_link_reset() speed: %d duplex: %d setting mode to 0x%04x\n",
909 ecmd.speed, ecmd.duplex, mode);
906 910
907 asix_write_medium_mode(dev, mode); 911 asix_write_medium_mode(dev, mode);
908 912
@@ -1059,10 +1063,10 @@ static int marvell_phy_init(struct usbnet *dev)
1059 struct asix_data *data = (struct asix_data *)&dev->data; 1063 struct asix_data *data = (struct asix_data *)&dev->data;
1060 u16 reg; 1064 u16 reg;
1061 1065
1062 devdbg(dev,"marvell_phy_init()"); 1066 netdev_dbg(dev->net, "marvell_phy_init()\n");
1063 1067
1064 reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_MARVELL_STATUS); 1068 reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_MARVELL_STATUS);
1065 devdbg(dev,"MII_MARVELL_STATUS = 0x%04x", reg); 1069 netdev_dbg(dev->net, "MII_MARVELL_STATUS = 0x%04x\n", reg);
1066 1070
1067 asix_mdio_write(dev->net, dev->mii.phy_id, MII_MARVELL_CTRL, 1071 asix_mdio_write(dev->net, dev->mii.phy_id, MII_MARVELL_CTRL,
1068 MARVELL_CTRL_RXDELAY | MARVELL_CTRL_TXDELAY); 1072 MARVELL_CTRL_RXDELAY | MARVELL_CTRL_TXDELAY);
@@ -1070,7 +1074,7 @@ static int marvell_phy_init(struct usbnet *dev)
1070 if (data->ledmode) { 1074 if (data->ledmode) {
1071 reg = asix_mdio_read(dev->net, dev->mii.phy_id, 1075 reg = asix_mdio_read(dev->net, dev->mii.phy_id,
1072 MII_MARVELL_LED_CTRL); 1076 MII_MARVELL_LED_CTRL);
1073 devdbg(dev,"MII_MARVELL_LED_CTRL (1) = 0x%04x", reg); 1077 netdev_dbg(dev->net, "MII_MARVELL_LED_CTRL (1) = 0x%04x\n", reg);
1074 1078
1075 reg &= 0xf8ff; 1079 reg &= 0xf8ff;
1076 reg |= (1 + 0x0100); 1080 reg |= (1 + 0x0100);
@@ -1079,7 +1083,7 @@ static int marvell_phy_init(struct usbnet *dev)
1079 1083
1080 reg = asix_mdio_read(dev->net, dev->mii.phy_id, 1084 reg = asix_mdio_read(dev->net, dev->mii.phy_id,
1081 MII_MARVELL_LED_CTRL); 1085 MII_MARVELL_LED_CTRL);
1082 devdbg(dev,"MII_MARVELL_LED_CTRL (2) = 0x%04x", reg); 1086 netdev_dbg(dev->net, "MII_MARVELL_LED_CTRL (2) = 0x%04x\n", reg);
1083 reg &= 0xfc0f; 1087 reg &= 0xfc0f;
1084 } 1088 }
1085 1089
@@ -1090,7 +1094,7 @@ static int marvell_led_status(struct usbnet *dev, u16 speed)
1090{ 1094{
1091 u16 reg = asix_mdio_read(dev->net, dev->mii.phy_id, MARVELL_LED_MANUAL); 1095 u16 reg = asix_mdio_read(dev->net, dev->mii.phy_id, MARVELL_LED_MANUAL);
1092 1096
1093 devdbg(dev, "marvell_led_status() read 0x%04x", reg); 1097 netdev_dbg(dev->net, "marvell_led_status() read 0x%04x\n", reg);
1094 1098
1095 /* Clear out the center LED bits - 0x03F0 */ 1099 /* Clear out the center LED bits - 0x03F0 */
1096 reg &= 0xfc0f; 1100 reg &= 0xfc0f;
@@ -1106,7 +1110,7 @@ static int marvell_led_status(struct usbnet *dev, u16 speed)
1106 reg |= 0x02f0; 1110 reg |= 0x02f0;
1107 } 1111 }
1108 1112
1109 devdbg(dev, "marvell_led_status() writing 0x%04x", reg); 1113 netdev_dbg(dev->net, "marvell_led_status() writing 0x%04x\n", reg);
1110 asix_mdio_write(dev->net, dev->mii.phy_id, MARVELL_LED_MANUAL, reg); 1114 asix_mdio_write(dev->net, dev->mii.phy_id, MARVELL_LED_MANUAL, reg);
1111 1115
1112 return 0; 1116 return 0;
@@ -1118,7 +1122,7 @@ static int ax88178_link_reset(struct usbnet *dev)
1118 struct ethtool_cmd ecmd; 1122 struct ethtool_cmd ecmd;
1119 struct asix_data *data = (struct asix_data *)&dev->data; 1123 struct asix_data *data = (struct asix_data *)&dev->data;
1120 1124
1121 devdbg(dev,"ax88178_link_reset()"); 1125 netdev_dbg(dev->net, "ax88178_link_reset()\n");
1122 1126
1123 mii_check_media(&dev->mii, 1, 1); 1127 mii_check_media(&dev->mii, 1, 1);
1124 mii_ethtool_gset(&dev->mii, &ecmd); 1128 mii_ethtool_gset(&dev->mii, &ecmd);
@@ -1138,7 +1142,8 @@ static int ax88178_link_reset(struct usbnet *dev)
1138 else 1142 else
1139 mode &= ~AX_MEDIUM_FD; 1143 mode &= ~AX_MEDIUM_FD;
1140 1144
1141 devdbg(dev, "ax88178_link_reset() speed: %d duplex: %d setting mode to 0x%04x", ecmd.speed, ecmd.duplex, mode); 1145 netdev_dbg(dev->net, "ax88178_link_reset() speed: %d duplex: %d setting mode to 0x%04x\n",
1146 ecmd.speed, ecmd.duplex, mode);
1142 1147
1143 asix_write_medium_mode(dev, mode); 1148 asix_write_medium_mode(dev, mode);
1144 1149
@@ -1188,7 +1193,7 @@ static int ax88178_change_mtu(struct net_device *net, int new_mtu)
1188 struct usbnet *dev = netdev_priv(net); 1193 struct usbnet *dev = netdev_priv(net);
1189 int ll_mtu = new_mtu + net->hard_header_len + 4; 1194 int ll_mtu = new_mtu + net->hard_header_len + 4;
1190 1195
1191 devdbg(dev, "ax88178_change_mtu() new_mtu=%d", new_mtu); 1196 netdev_dbg(dev->net, "ax88178_change_mtu() new_mtu=%d\n", new_mtu);
1192 1197
1193 if (new_mtu <= 0 || ll_mtu > 16384) 1198 if (new_mtu <= 0 || ll_mtu > 16384)
1194 return -EINVAL; 1199 return -EINVAL;
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 22b87e64a810..96f1ebe0d348 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -632,7 +632,6 @@ static void catc_set_multicast_list(struct net_device *netdev)
632 struct dev_mc_list *mc; 632 struct dev_mc_list *mc;
633 u8 broadcast[6]; 633 u8 broadcast[6];
634 u8 rx = RxEnable | RxPolarity | RxMultiCast; 634 u8 rx = RxEnable | RxPolarity | RxMultiCast;
635 int i;
636 635
637 memset(broadcast, 0xff, 6); 636 memset(broadcast, 0xff, 6);
638 memset(catc->multicast, 0, 64); 637 memset(catc->multicast, 0, 64);
@@ -648,7 +647,7 @@ static void catc_set_multicast_list(struct net_device *netdev)
648 if (netdev->flags & IFF_ALLMULTI) { 647 if (netdev->flags & IFF_ALLMULTI) {
649 memset(catc->multicast, 0xff, 64); 648 memset(catc->multicast, 0xff, 64);
650 } else { 649 } else {
651 for (i = 0, mc = netdev->mc_list; mc && i < netdev->mc_count; i++, mc = mc->next) { 650 netdev_for_each_mc_addr(mc, netdev) {
652 u32 crc = ether_crc_le(6, mc->dmi_addr); 651 u32 crc = ether_crc_le(6, mc->dmi_addr);
653 if (!catc->is_f5u011) { 652 if (!catc->is_f5u011) {
654 catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7); 653 catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7);
@@ -897,11 +896,9 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
897 f5u011_rxmode(catc, catc->rxmode); 896 f5u011_rxmode(catc, catc->rxmode);
898 } 897 }
899 dbg("Init done."); 898 dbg("Init done.");
900 printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, ", 899 printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n",
901 netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate", 900 netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate",
902 usbdev->bus->bus_name, usbdev->devpath); 901 usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr);
903 for (i = 0; i < 5; i++) printk("%2.2x:", netdev->dev_addr[i]);
904 printk("%2.2x.\n", netdev->dev_addr[i]);
905 usb_set_intfdata(intf, catc); 902 usb_set_intfdata(intf, catc);
906 903
907 SET_NETDEV_DEV(netdev, &intf->dev); 904 SET_NETDEV_DEV(netdev, &intf->dev);
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index c337ffc3304a..a4a85a6ed86d 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -73,7 +73,7 @@ static void eem_linkcmd(struct usbnet *dev, struct sk_buff *skb)
73 usb_free_urb(urb); 73 usb_free_urb(urb);
74fail: 74fail:
75 dev_kfree_skb(skb); 75 dev_kfree_skb(skb);
76 devwarn(dev, "link cmd failure\n"); 76 netdev_warn(dev->net, "link cmd failure\n");
77 return; 77 return;
78 } 78 }
79} 79}
@@ -212,7 +212,8 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
212 * b15: 1 (EEM command) 212 * b15: 1 (EEM command)
213 */ 213 */
214 if (header & BIT(14)) { 214 if (header & BIT(14)) {
215 devdbg(dev, "reserved command %04x\n", header); 215 netdev_dbg(dev->net, "reserved command %04x\n",
216 header);
216 continue; 217 continue;
217 } 218 }
218 219
@@ -255,8 +256,9 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
255 case 1: /* Echo response */ 256 case 1: /* Echo response */
256 case 5: /* Tickle */ 257 case 5: /* Tickle */
257 default: /* reserved */ 258 default: /* reserved */
258 devwarn(dev, "unexpected link command %d\n", 259 netdev_warn(dev->net,
259 bmEEMCmd); 260 "unexpected link command %d\n",
261 bmEEMCmd);
260 continue; 262 continue;
261 } 263 }
262 264
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 5f3b9eaeb04f..c8cdb7f30adc 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -339,10 +339,10 @@ EXPORT_SYMBOL_GPL(usbnet_cdc_unbind);
339 339
340static void dumpspeed(struct usbnet *dev, __le32 *speeds) 340static void dumpspeed(struct usbnet *dev, __le32 *speeds)
341{ 341{
342 if (netif_msg_timer(dev)) 342 netif_info(dev, timer, dev->net,
343 devinfo(dev, "link speeds: %u kbps up, %u kbps down", 343 "link speeds: %u kbps up, %u kbps down\n",
344 __le32_to_cpu(speeds[0]) / 1000, 344 __le32_to_cpu(speeds[0]) / 1000,
345 __le32_to_cpu(speeds[1]) / 1000); 345 __le32_to_cpu(speeds[1]) / 1000);
346} 346}
347 347
348static void cdc_status(struct usbnet *dev, struct urb *urb) 348static void cdc_status(struct usbnet *dev, struct urb *urb)
@@ -361,18 +361,16 @@ static void cdc_status(struct usbnet *dev, struct urb *urb)
361 event = urb->transfer_buffer; 361 event = urb->transfer_buffer;
362 switch (event->bNotificationType) { 362 switch (event->bNotificationType) {
363 case USB_CDC_NOTIFY_NETWORK_CONNECTION: 363 case USB_CDC_NOTIFY_NETWORK_CONNECTION:
364 if (netif_msg_timer(dev)) 364 netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n",
365 devdbg(dev, "CDC: carrier %s", 365 event->wValue ? "on" : "off");
366 event->wValue ? "on" : "off");
367 if (event->wValue) 366 if (event->wValue)
368 netif_carrier_on(dev->net); 367 netif_carrier_on(dev->net);
369 else 368 else
370 netif_carrier_off(dev->net); 369 netif_carrier_off(dev->net);
371 break; 370 break;
372 case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */ 371 case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */
373 if (netif_msg_timer(dev)) 372 netif_dbg(dev, timer, dev->net, "CDC: speed change (len %d)\n",
374 devdbg(dev, "CDC: speed change (len %d)", 373 urb->actual_length);
375 urb->actual_length);
376 if (urb->actual_length != (sizeof *event + 8)) 374 if (urb->actual_length != (sizeof *event + 8))
377 set_bit(EVENT_STS_SPLIT, &dev->flags); 375 set_bit(EVENT_STS_SPLIT, &dev->flags);
378 else 376 else
@@ -382,8 +380,8 @@ static void cdc_status(struct usbnet *dev, struct urb *urb)
382 * but there are no standard formats for the response data. 380 * but there are no standard formats for the response data.
383 */ 381 */
384 default: 382 default:
385 deverr(dev, "CDC: unexpected notification %02x!", 383 netdev_err(dev->net, "CDC: unexpected notification %02x!\n",
386 event->bNotificationType); 384 event->bNotificationType);
387 break; 385 break;
388 } 386 }
389} 387}
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 3d406f9b2f29..269339769f47 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -58,7 +58,7 @@ static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data)
58 void *buf; 58 void *buf;
59 int err = -ENOMEM; 59 int err = -ENOMEM;
60 60
61 devdbg(dev, "dm_read() reg=0x%02x length=%d", reg, length); 61 netdev_dbg(dev->net, "dm_read() reg=0x%02x length=%d\n", reg, length);
62 62
63 buf = kmalloc(length, GFP_KERNEL); 63 buf = kmalloc(length, GFP_KERNEL);
64 if (!buf) 64 if (!buf)
@@ -89,7 +89,7 @@ static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)
89 void *buf = NULL; 89 void *buf = NULL;
90 int err = -ENOMEM; 90 int err = -ENOMEM;
91 91
92 devdbg(dev, "dm_write() reg=0x%02x, length=%d", reg, length); 92 netdev_dbg(dev->net, "dm_write() reg=0x%02x, length=%d\n", reg, length);
93 93
94 if (data) { 94 if (data) {
95 buf = kmalloc(length, GFP_KERNEL); 95 buf = kmalloc(length, GFP_KERNEL);
@@ -112,7 +112,8 @@ static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)
112 112
113static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value) 113static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)
114{ 114{
115 devdbg(dev, "dm_write_reg() reg=0x%02x, value=0x%02x", reg, value); 115 netdev_dbg(dev->net, "dm_write_reg() reg=0x%02x, value=0x%02x\n",
116 reg, value);
116 return usb_control_msg(dev->udev, 117 return usb_control_msg(dev->udev,
117 usb_sndctrlpipe(dev->udev, 0), 118 usb_sndctrlpipe(dev->udev, 0),
118 DM_WRITE_REG, 119 DM_WRITE_REG,
@@ -142,13 +143,13 @@ static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value,
142 143
143 urb = usb_alloc_urb(0, GFP_ATOMIC); 144 urb = usb_alloc_urb(0, GFP_ATOMIC);
144 if (!urb) { 145 if (!urb) {
145 deverr(dev, "Error allocating URB in dm_write_async_helper!"); 146 netdev_err(dev->net, "Error allocating URB in dm_write_async_helper!\n");
146 return; 147 return;
147 } 148 }
148 149
149 req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC); 150 req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
150 if (!req) { 151 if (!req) {
151 deverr(dev, "Failed to allocate memory for control request"); 152 netdev_err(dev->net, "Failed to allocate memory for control request\n");
152 usb_free_urb(urb); 153 usb_free_urb(urb);
153 return; 154 return;
154 } 155 }
@@ -166,8 +167,8 @@ static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value,
166 167
167 status = usb_submit_urb(urb, GFP_ATOMIC); 168 status = usb_submit_urb(urb, GFP_ATOMIC);
168 if (status < 0) { 169 if (status < 0) {
169 deverr(dev, "Error submitting the control message: status=%d", 170 netdev_err(dev->net, "Error submitting the control message: status=%d\n",
170 status); 171 status);
171 kfree(req); 172 kfree(req);
172 usb_free_urb(urb); 173 usb_free_urb(urb);
173 } 174 }
@@ -175,15 +176,15 @@ static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value,
175 176
176static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data) 177static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
177{ 178{
178 devdbg(dev, "dm_write_async() reg=0x%02x length=%d", reg, length); 179 netdev_dbg(dev->net, "dm_write_async() reg=0x%02x length=%d\n", reg, length);
179 180
180 dm_write_async_helper(dev, reg, 0, length, data); 181 dm_write_async_helper(dev, reg, 0, length, data);
181} 182}
182 183
183static void dm_write_reg_async(struct usbnet *dev, u8 reg, u8 value) 184static void dm_write_reg_async(struct usbnet *dev, u8 reg, u8 value)
184{ 185{
185 devdbg(dev, "dm_write_reg_async() reg=0x%02x value=0x%02x", 186 netdev_dbg(dev->net, "dm_write_reg_async() reg=0x%02x value=0x%02x\n",
186 reg, value); 187 reg, value);
187 188
188 dm_write_async_helper(dev, reg, value, 0, NULL); 189 dm_write_async_helper(dev, reg, value, 0, NULL);
189} 190}
@@ -211,7 +212,7 @@ static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 *valu
211 } 212 }
212 213
213 if (i == DM_TIMEOUT) { 214 if (i == DM_TIMEOUT) {
214 deverr(dev, "%s read timed out!", phy ? "phy" : "eeprom"); 215 netdev_err(dev->net, "%s read timed out!\n", phy ? "phy" : "eeprom");
215 ret = -EIO; 216 ret = -EIO;
216 goto out; 217 goto out;
217 } 218 }
@@ -219,8 +220,8 @@ static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 *valu
219 dm_write_reg(dev, DM_SHARED_CTRL, 0x0); 220 dm_write_reg(dev, DM_SHARED_CTRL, 0x0);
220 ret = dm_read(dev, DM_SHARED_DATA, 2, value); 221 ret = dm_read(dev, DM_SHARED_DATA, 2, value);
221 222
222 devdbg(dev, "read shared %d 0x%02x returned 0x%04x, %d", 223 netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n",
223 phy, reg, *value, ret); 224 phy, reg, *value, ret);
224 225
225 out: 226 out:
226 mutex_unlock(&dev->phy_mutex); 227 mutex_unlock(&dev->phy_mutex);
@@ -254,7 +255,7 @@ static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 valu
254 } 255 }
255 256
256 if (i == DM_TIMEOUT) { 257 if (i == DM_TIMEOUT) {
257 deverr(dev, "%s write timed out!", phy ? "phy" : "eeprom"); 258 netdev_err(dev->net, "%s write timed out!\n", phy ? "phy" : "eeprom");
258 ret = -EIO; 259 ret = -EIO;
259 goto out; 260 goto out;
260 } 261 }
@@ -304,15 +305,15 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
304 __le16 res; 305 __le16 res;
305 306
306 if (phy_id) { 307 if (phy_id) {
307 devdbg(dev, "Only internal phy supported"); 308 netdev_dbg(dev->net, "Only internal phy supported\n");
308 return 0; 309 return 0;
309 } 310 }
310 311
311 dm_read_shared_word(dev, 1, loc, &res); 312 dm_read_shared_word(dev, 1, loc, &res);
312 313
313 devdbg(dev, 314 netdev_dbg(dev->net,
314 "dm9601_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x", 315 "dm9601_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
315 phy_id, loc, le16_to_cpu(res)); 316 phy_id, loc, le16_to_cpu(res));
316 317
317 return le16_to_cpu(res); 318 return le16_to_cpu(res);
318} 319}
@@ -324,12 +325,12 @@ static void dm9601_mdio_write(struct net_device *netdev, int phy_id, int loc,
324 __le16 res = cpu_to_le16(val); 325 __le16 res = cpu_to_le16(val);
325 326
326 if (phy_id) { 327 if (phy_id) {
327 devdbg(dev, "Only internal phy supported"); 328 netdev_dbg(dev->net, "Only internal phy supported\n");
328 return; 329 return;
329 } 330 }
330 331
331 devdbg(dev,"dm9601_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x", 332 netdev_dbg(dev->net, "dm9601_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
332 phy_id, loc, val); 333 phy_id, loc, val);
333 334
334 dm_write_shared_word(dev, 1, loc, res); 335 dm_write_shared_word(dev, 1, loc, res);
335} 336}
@@ -381,13 +382,13 @@ static void dm9601_set_multicast(struct net_device *net)
381 382
382 if (net->flags & IFF_PROMISC) { 383 if (net->flags & IFF_PROMISC) {
383 rx_ctl |= 0x02; 384 rx_ctl |= 0x02;
384 } else if (net->flags & IFF_ALLMULTI || net->mc_count > DM_MAX_MCAST) { 385 } else if (net->flags & IFF_ALLMULTI ||
386 netdev_mc_count(net) > DM_MAX_MCAST) {
385 rx_ctl |= 0x04; 387 rx_ctl |= 0x04;
386 } else if (net->mc_count) { 388 } else if (!netdev_mc_empty(net)) {
387 struct dev_mc_list *mc_list = net->mc_list; 389 struct dev_mc_list *mc_list;
388 int i;
389 390
390 for (i = 0; i < net->mc_count; i++, mc_list = mc_list->next) { 391 netdev_for_each_mc_addr(mc_list, net) {
391 u32 crc = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26; 392 u32 crc = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
392 hashes[crc >> 3] |= 1 << (crc & 0x7); 393 hashes[crc >> 3] |= 1 << (crc & 0x7);
393 } 394 }
@@ -592,7 +593,7 @@ static void dm9601_status(struct usbnet *dev, struct urb *urb)
592 } 593 }
593 else 594 else
594 netif_carrier_off(dev->net); 595 netif_carrier_off(dev->net);
595 devdbg(dev, "Link Status is: %d", link); 596 netdev_dbg(dev->net, "Link Status is: %d\n", link);
596 } 597 }
597} 598}
598 599
@@ -603,8 +604,8 @@ static int dm9601_link_reset(struct usbnet *dev)
603 mii_check_media(&dev->mii, 1, 1); 604 mii_check_media(&dev->mii, 1, 1);
604 mii_ethtool_gset(&dev->mii, &ecmd); 605 mii_ethtool_gset(&dev->mii, &ecmd);
605 606
606 devdbg(dev, "link_reset() speed: %d duplex: %d", 607 netdev_dbg(dev->net, "link_reset() speed: %d duplex: %d\n",
607 ecmd.speed, ecmd.duplex); 608 ecmd.speed, ecmd.duplex);
608 609
609 return 0; 610 return 0;
610} 611}
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index 55cf7081de10..3c228df57062 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -51,7 +51,7 @@ static int int51x1_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
51 int len; 51 int len;
52 52
53 if (!(pskb_may_pull(skb, INT51X1_HEADER_SIZE))) { 53 if (!(pskb_may_pull(skb, INT51X1_HEADER_SIZE))) {
54 deverr(dev, "unexpected tiny rx frame"); 54 netdev_err(dev->net, "unexpected tiny rx frame\n");
55 return 0; 55 return 0;
56 } 56 }
57 57
@@ -138,25 +138,25 @@ static void int51x1_set_multicast(struct net_device *netdev)
138 if (netdev->flags & IFF_PROMISC) { 138 if (netdev->flags & IFF_PROMISC) {
139 /* do not expect to see traffic of other PLCs */ 139 /* do not expect to see traffic of other PLCs */
140 filter |= PACKET_TYPE_PROMISCUOUS; 140 filter |= PACKET_TYPE_PROMISCUOUS;
141 devinfo(dev, "promiscuous mode enabled"); 141 netdev_info(dev->net, "promiscuous mode enabled\n");
142 } else if (netdev->mc_count || 142 } else if (!netdev_mc_empty(netdev) ||
143 (netdev->flags & IFF_ALLMULTI)) { 143 (netdev->flags & IFF_ALLMULTI)) {
144 filter |= PACKET_TYPE_ALL_MULTICAST; 144 filter |= PACKET_TYPE_ALL_MULTICAST;
145 devdbg(dev, "receive all multicast enabled"); 145 netdev_dbg(dev->net, "receive all multicast enabled\n");
146 } else { 146 } else {
147 /* ~PROMISCUOUS, ~MULTICAST */ 147 /* ~PROMISCUOUS, ~MULTICAST */
148 devdbg(dev, "receive own packets only"); 148 netdev_dbg(dev->net, "receive own packets only\n");
149 } 149 }
150 150
151 urb = usb_alloc_urb(0, GFP_ATOMIC); 151 urb = usb_alloc_urb(0, GFP_ATOMIC);
152 if (!urb) { 152 if (!urb) {
153 devwarn(dev, "Error allocating URB"); 153 netdev_warn(dev->net, "Error allocating URB\n");
154 return; 154 return;
155 } 155 }
156 156
157 req = kmalloc(sizeof(*req), GFP_ATOMIC); 157 req = kmalloc(sizeof(*req), GFP_ATOMIC);
158 if (!req) { 158 if (!req) {
159 devwarn(dev, "Error allocating control msg"); 159 netdev_warn(dev->net, "Error allocating control msg\n");
160 goto out; 160 goto out;
161 } 161 }
162 162
@@ -173,7 +173,8 @@ static void int51x1_set_multicast(struct net_device *netdev)
173 173
174 status = usb_submit_urb(urb, GFP_ATOMIC); 174 status = usb_submit_urb(urb, GFP_ATOMIC);
175 if (status < 0) { 175 if (status < 0) {
176 devwarn(dev, "Error submitting control msg, sts=%d", status); 176 netdev_warn(dev->net, "Error submitting control msg, sts=%d\n",
177 status);
177 goto out1; 178 goto out1;
178 } 179 }
179 return; 180 return;
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index f1d64ef67efa..52671ea043a7 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -881,7 +881,7 @@ static void kaweth_set_rx_mode(struct net_device *net)
881 if (net->flags & IFF_PROMISC) { 881 if (net->flags & IFF_PROMISC) {
882 packet_filter_bitmap |= KAWETH_PACKET_FILTER_PROMISCUOUS; 882 packet_filter_bitmap |= KAWETH_PACKET_FILTER_PROMISCUOUS;
883 } 883 }
884 else if ((net->mc_count) || (net->flags & IFF_ALLMULTI)) { 884 else if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) {
885 packet_filter_bitmap |= KAWETH_PACKET_FILTER_ALL_MULTICAST; 885 packet_filter_bitmap |= KAWETH_PACKET_FILTER_ALL_MULTICAST;
886 } 886 }
887 887
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 87374317f480..70978219e98a 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -1,13 +1,27 @@
1/* 1/*
2 * MosChips MCS7830 based USB 2.0 Ethernet Devices 2 * MOSCHIP MCS7830 based USB 2.0 Ethernet Devices
3 * 3 *
4 * based on usbnet.c, asix.c and the vendor provided mcs7830 driver 4 * based on usbnet.c, asix.c and the vendor provided mcs7830 driver
5 * 5 *
6 * Copyright (C) 2010 Andreas Mohr <andi@lisas.de>
6 * Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de> 7 * Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de>
7 * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com> 8 * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
8 * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net> 9 * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
9 * Copyright (c) 2002-2003 TiVo Inc. 10 * Copyright (c) 2002-2003 TiVo Inc.
10 * 11 *
12 * Definitions gathered from MOSCHIP, Data Sheet_7830DA.pdf (thanks!).
13 *
14 * TODO:
15 * - support HIF_REG_CONFIG_SLEEPMODE/HIF_REG_CONFIG_TXENABLE (via autopm?)
16 * - implement ethtool_ops get_pauseparam/set_pauseparam
17 * via HIF_REG_PAUSE_THRESHOLD (>= revision C only!)
18 * - implement get_eeprom/[set_eeprom]
19 * - switch PHY on/off on ifup/ifdown (perhaps in usbnet.c, via MII)
20 * - mcs7830_get_regs() handling is weird: for rev 2 we return 32 regs,
21 * can access only ~ 24, remaining user buffer is uninitialized garbage
22 * - anything else?
23 *
24 *
11 * This program is free software; you can redistribute it and/or modify 25 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 26 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or 27 * the Free Software Foundation; either version 2 of the License, or
@@ -55,7 +69,7 @@
55 ADVERTISE_100HALF | ADVERTISE_10FULL | \ 69 ADVERTISE_100HALF | ADVERTISE_10FULL | \
56 ADVERTISE_10HALF | ADVERTISE_CSMA) 70 ADVERTISE_10HALF | ADVERTISE_CSMA)
57 71
58/* HIF_REG_XX coressponding index value */ 72/* HIF_REG_XX corresponding index value */
59enum { 73enum {
60 HIF_REG_MULTICAST_HASH = 0x00, 74 HIF_REG_MULTICAST_HASH = 0x00,
61 HIF_REG_PACKET_GAP1 = 0x08, 75 HIF_REG_PACKET_GAP1 = 0x08,
@@ -69,6 +83,7 @@ enum {
69 HIF_REG_PHY_CMD2_PEND_FLAG_BIT = 0x80, 83 HIF_REG_PHY_CMD2_PEND_FLAG_BIT = 0x80,
70 HIF_REG_PHY_CMD2_READY_FLAG_BIT = 0x40, 84 HIF_REG_PHY_CMD2_READY_FLAG_BIT = 0x40,
71 HIF_REG_CONFIG = 0x0e, 85 HIF_REG_CONFIG = 0x0e,
86 /* hmm, spec sez: "R/W", "Except bit 3" (likely TXENABLE). */
72 HIF_REG_CONFIG_CFG = 0x80, 87 HIF_REG_CONFIG_CFG = 0x80,
73 HIF_REG_CONFIG_SPEED100 = 0x40, 88 HIF_REG_CONFIG_SPEED100 = 0x40,
74 HIF_REG_CONFIG_FULLDUPLEX_ENABLE = 0x20, 89 HIF_REG_CONFIG_FULLDUPLEX_ENABLE = 0x20,
@@ -76,13 +91,24 @@ enum {
76 HIF_REG_CONFIG_TXENABLE = 0x08, 91 HIF_REG_CONFIG_TXENABLE = 0x08,
77 HIF_REG_CONFIG_SLEEPMODE = 0x04, 92 HIF_REG_CONFIG_SLEEPMODE = 0x04,
78 HIF_REG_CONFIG_ALLMULTICAST = 0x02, 93 HIF_REG_CONFIG_ALLMULTICAST = 0x02,
79 HIF_REG_CONFIG_PROMISCIOUS = 0x01, 94 HIF_REG_CONFIG_PROMISCUOUS = 0x01,
80 HIF_REG_ETHERNET_ADDR = 0x0f, 95 HIF_REG_ETHERNET_ADDR = 0x0f,
81 HIF_REG_22 = 0x15, 96 HIF_REG_FRAME_DROP_COUNTER = 0x15, /* 0..ff; reset: 0 */
82 HIF_REG_PAUSE_THRESHOLD = 0x16, 97 HIF_REG_PAUSE_THRESHOLD = 0x16,
83 HIF_REG_PAUSE_THRESHOLD_DEFAULT = 0, 98 HIF_REG_PAUSE_THRESHOLD_DEFAULT = 0,
84}; 99};
85 100
101/* Trailing status byte in Ethernet Rx frame */
102enum {
103 MCS7830_RX_SHORT_FRAME = 0x01, /* < 64 bytes */
104 MCS7830_RX_LENGTH_ERROR = 0x02, /* framelen != Ethernet length field */
105 MCS7830_RX_ALIGNMENT_ERROR = 0x04, /* non-even number of nibbles */
106 MCS7830_RX_CRC_ERROR = 0x08,
107 MCS7830_RX_LARGE_FRAME = 0x10, /* > 1518 bytes */
108 MCS7830_RX_FRAME_CORRECT = 0x20, /* frame is correct */
109 /* [7:6] reserved */
110};
111
86struct mcs7830_data { 112struct mcs7830_data {
87 u8 multi_filter[8]; 113 u8 multi_filter[8];
88 u8 config; 114 u8 config;
@@ -109,7 +135,7 @@ static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data)
109 return ret; 135 return ret;
110} 136}
111 137
112static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, void *data) 138static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *data)
113{ 139{
114 struct usb_device *xdev = dev->udev; 140 struct usb_device *xdev = dev->udev;
115 int ret; 141 int ret;
@@ -183,13 +209,43 @@ out:
183 usb_free_urb(urb); 209 usb_free_urb(urb);
184} 210}
185 211
186static int mcs7830_get_address(struct usbnet *dev) 212static int mcs7830_hif_get_mac_address(struct usbnet *dev, unsigned char *addr)
213{
214 int ret = mcs7830_get_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, addr);
215 if (ret < 0)
216 return ret;
217 return 0;
218}
219
220static int mcs7830_hif_set_mac_address(struct usbnet *dev, unsigned char *addr)
221{
222 int ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, addr);
223
224 if (ret < 0)
225 return ret;
226 return 0;
227}
228
229static int mcs7830_set_mac_address(struct net_device *netdev, void *p)
187{ 230{
188 int ret; 231 int ret;
189 ret = mcs7830_get_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, 232 struct usbnet *dev = netdev_priv(netdev);
190 dev->net->dev_addr); 233 struct sockaddr *addr = p;
234
235 if (netif_running(netdev))
236 return -EBUSY;
237
238 if (!is_valid_ether_addr(addr->sa_data))
239 return -EINVAL;
240
241 ret = mcs7830_hif_set_mac_address(dev, addr->sa_data);
242
191 if (ret < 0) 243 if (ret < 0)
192 return ret; 244 return ret;
245
246 /* it worked --> adopt it on netdev side */
247 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
248
193 return 0; 249 return 0;
194} 250}
195 251
@@ -307,7 +363,7 @@ static int mcs7830_get_rev(struct usbnet *dev)
307{ 363{
308 u8 dummy[2]; 364 u8 dummy[2];
309 int ret; 365 int ret;
310 ret = mcs7830_get_reg(dev, HIF_REG_22, 2, dummy); 366 ret = mcs7830_get_reg(dev, HIF_REG_FRAME_DROP_COUNTER, 2, dummy);
311 if (ret > 0) 367 if (ret > 0)
312 return 2; /* Rev C or later */ 368 return 2; /* Rev C or later */
313 return 1; /* earlier revision */ 369 return 1; /* earlier revision */
@@ -331,33 +387,6 @@ static void mcs7830_rev_C_fixup(struct usbnet *dev)
331 } 387 }
332} 388}
333 389
334static int mcs7830_init_dev(struct usbnet *dev)
335{
336 int ret;
337 int retry;
338
339 /* Read MAC address from EEPROM */
340 ret = -EINVAL;
341 for (retry = 0; retry < 5 && ret; retry++)
342 ret = mcs7830_get_address(dev);
343 if (ret) {
344 dev_warn(&dev->udev->dev, "Cannot read MAC address\n");
345 goto out;
346 }
347
348 /* Set up PHY */
349 ret = mcs7830_set_autoneg(dev, 0);
350 if (ret) {
351 dev_info(&dev->udev->dev, "Cannot set autoneg\n");
352 goto out;
353 }
354
355 mcs7830_rev_C_fixup(dev);
356 ret = 0;
357out:
358 return ret;
359}
360
361static int mcs7830_mdio_read(struct net_device *netdev, int phy_id, 390static int mcs7830_mdio_read(struct net_device *netdev, int phy_id,
362 int location) 391 int location)
363{ 392{
@@ -378,11 +407,33 @@ static int mcs7830_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
378 return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL); 407 return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
379} 408}
380 409
381/* credits go to asix_set_multicast */ 410static inline struct mcs7830_data *mcs7830_get_data(struct usbnet *dev)
382static void mcs7830_set_multicast(struct net_device *net) 411{
412 return (struct mcs7830_data *)&dev->data;
413}
414
415static void mcs7830_hif_update_multicast_hash(struct usbnet *dev)
416{
417 struct mcs7830_data *data = mcs7830_get_data(dev);
418 mcs7830_set_reg_async(dev, HIF_REG_MULTICAST_HASH,
419 sizeof data->multi_filter,
420 data->multi_filter);
421}
422
423static void mcs7830_hif_update_config(struct usbnet *dev)
424{
425 /* implementation specific to data->config
426 (argument needs to be heap-based anyway - USB DMA!) */
427 struct mcs7830_data *data = mcs7830_get_data(dev);
428 mcs7830_set_reg_async(dev, HIF_REG_CONFIG, 1, &data->config);
429}
430
431static void mcs7830_data_set_multicast(struct net_device *net)
383{ 432{
384 struct usbnet *dev = netdev_priv(net); 433 struct usbnet *dev = netdev_priv(net);
385 struct mcs7830_data *data = (struct mcs7830_data *)&dev->data; 434 struct mcs7830_data *data = mcs7830_get_data(dev);
435
436 memset(data->multi_filter, 0, sizeof data->multi_filter);
386 437
387 data->config = HIF_REG_CONFIG_TXENABLE; 438 data->config = HIF_REG_CONFIG_TXENABLE;
388 439
@@ -390,36 +441,64 @@ static void mcs7830_set_multicast(struct net_device *net)
390 data->config |= HIF_REG_CONFIG_ALLMULTICAST; 441 data->config |= HIF_REG_CONFIG_ALLMULTICAST;
391 442
392 if (net->flags & IFF_PROMISC) { 443 if (net->flags & IFF_PROMISC) {
393 data->config |= HIF_REG_CONFIG_PROMISCIOUS; 444 data->config |= HIF_REG_CONFIG_PROMISCUOUS;
394 } else if (net->flags & IFF_ALLMULTI || 445 } else if (net->flags & IFF_ALLMULTI ||
395 net->mc_count > MCS7830_MAX_MCAST) { 446 netdev_mc_count(net) > MCS7830_MAX_MCAST) {
396 data->config |= HIF_REG_CONFIG_ALLMULTICAST; 447 data->config |= HIF_REG_CONFIG_ALLMULTICAST;
397 } else if (net->mc_count == 0) { 448 } else if (netdev_mc_empty(net)) {
398 /* just broadcast and directed */ 449 /* just broadcast and directed */
399 } else { 450 } else {
400 /* We use the 20 byte dev->data 451 /* We use the 20 byte dev->data
401 * for our 8 byte filter buffer 452 * for our 8 byte filter buffer
402 * to avoid allocating memory that 453 * to avoid allocating memory that
403 * is tricky to free later */ 454 * is tricky to free later */
404 struct dev_mc_list *mc_list = net->mc_list; 455 struct dev_mc_list *mc_list;
405 u32 crc_bits; 456 u32 crc_bits;
406 int i;
407
408 memset(data->multi_filter, 0, sizeof data->multi_filter);
409 457
410 /* Build the multicast hash filter. */ 458 /* Build the multicast hash filter. */
411 for (i = 0; i < net->mc_count; i++) { 459 netdev_for_each_mc_addr(mc_list, net) {
412 crc_bits = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26; 460 crc_bits = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
413 data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7); 461 data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7);
414 mc_list = mc_list->next;
415 } 462 }
463 }
464}
416 465
417 mcs7830_set_reg_async(dev, HIF_REG_MULTICAST_HASH, 466static int mcs7830_apply_base_config(struct usbnet *dev)
418 sizeof data->multi_filter, 467{
419 data->multi_filter); 468 int ret;
469
470 /* re-configure known MAC (suspend case etc.) */
471 ret = mcs7830_hif_set_mac_address(dev, dev->net->dev_addr);
472 if (ret) {
473 dev_info(&dev->udev->dev, "Cannot set MAC address\n");
474 goto out;
420 } 475 }
421 476
422 mcs7830_set_reg_async(dev, HIF_REG_CONFIG, 1, &data->config); 477 /* Set up PHY */
478 ret = mcs7830_set_autoneg(dev, 0);
479 if (ret) {
480 dev_info(&dev->udev->dev, "Cannot set autoneg\n");
481 goto out;
482 }
483
484 mcs7830_hif_update_multicast_hash(dev);
485 mcs7830_hif_update_config(dev);
486
487 mcs7830_rev_C_fixup(dev);
488 ret = 0;
489out:
490 return ret;
491}
492
493/* credits go to asix_set_multicast */
494static void mcs7830_set_multicast(struct net_device *net)
495{
496 struct usbnet *dev = netdev_priv(net);
497
498 mcs7830_data_set_multicast(net);
499
500 mcs7830_hif_update_multicast_hash(dev);
501 mcs7830_hif_update_config(dev);
423} 502}
424 503
425static int mcs7830_get_regs_len(struct net_device *net) 504static int mcs7830_get_regs_len(struct net_device *net)
@@ -463,29 +542,6 @@ static const struct ethtool_ops mcs7830_ethtool_ops = {
463 .nway_reset = usbnet_nway_reset, 542 .nway_reset = usbnet_nway_reset,
464}; 543};
465 544
466static int mcs7830_set_mac_address(struct net_device *netdev, void *p)
467{
468 int ret;
469 struct usbnet *dev = netdev_priv(netdev);
470 struct sockaddr *addr = p;
471
472 if (netif_running(netdev))
473 return -EBUSY;
474
475 if (!is_valid_ether_addr(addr->sa_data))
476 return -EINVAL;
477
478 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
479
480 ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN,
481 netdev->dev_addr);
482
483 if (ret < 0)
484 return ret;
485
486 return 0;
487}
488
489static const struct net_device_ops mcs7830_netdev_ops = { 545static const struct net_device_ops mcs7830_netdev_ops = {
490 .ndo_open = usbnet_open, 546 .ndo_open = usbnet_open,
491 .ndo_stop = usbnet_stop, 547 .ndo_stop = usbnet_stop,
@@ -495,21 +551,32 @@ static const struct net_device_ops mcs7830_netdev_ops = {
495 .ndo_validate_addr = eth_validate_addr, 551 .ndo_validate_addr = eth_validate_addr,
496 .ndo_do_ioctl = mcs7830_ioctl, 552 .ndo_do_ioctl = mcs7830_ioctl,
497 .ndo_set_multicast_list = mcs7830_set_multicast, 553 .ndo_set_multicast_list = mcs7830_set_multicast,
498 .ndo_set_mac_address = mcs7830_set_mac_address, 554 .ndo_set_mac_address = mcs7830_set_mac_address,
499}; 555};
500 556
501static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev) 557static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev)
502{ 558{
503 struct net_device *net = dev->net; 559 struct net_device *net = dev->net;
504 int ret; 560 int ret;
561 int retry;
505 562
506 ret = mcs7830_init_dev(dev); 563 /* Initial startup: Gather MAC address setting from EEPROM */
564 ret = -EINVAL;
565 for (retry = 0; retry < 5 && ret; retry++)
566 ret = mcs7830_hif_get_mac_address(dev, net->dev_addr);
567 if (ret) {
568 dev_warn(&dev->udev->dev, "Cannot read MAC address\n");
569 goto out;
570 }
571
572 mcs7830_data_set_multicast(net);
573
574 ret = mcs7830_apply_base_config(dev);
507 if (ret) 575 if (ret)
508 goto out; 576 goto out;
509 577
510 net->ethtool_ops = &mcs7830_ethtool_ops; 578 net->ethtool_ops = &mcs7830_ethtool_ops;
511 net->netdev_ops = &mcs7830_netdev_ops; 579 net->netdev_ops = &mcs7830_netdev_ops;
512 mcs7830_set_multicast(net);
513 580
514 /* reserve space for the status byte on rx */ 581 /* reserve space for the status byte on rx */
515 dev->rx_urb_size = ETH_FRAME_LEN + 1; 582 dev->rx_urb_size = ETH_FRAME_LEN + 1;
@@ -526,7 +593,7 @@ out:
526 return ret; 593 return ret;
527} 594}
528 595
529/* The chip always appends a status bytes that we need to strip */ 596/* The chip always appends a status byte that we need to strip */
530static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 597static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
531{ 598{
532 u8 status; 599 u8 status;
@@ -539,9 +606,23 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
539 skb_trim(skb, skb->len - 1); 606 skb_trim(skb, skb->len - 1);
540 status = skb->data[skb->len]; 607 status = skb->data[skb->len];
541 608
542 if (status != 0x20) 609 if (status != MCS7830_RX_FRAME_CORRECT) {
543 dev_dbg(&dev->udev->dev, "rx fixup status %x\n", status); 610 dev_dbg(&dev->udev->dev, "rx fixup status %x\n", status);
544 611
612 /* hmm, perhaps usbnet.c already sees a globally visible
613 frame error and increments rx_errors on its own already? */
614 dev->net->stats.rx_errors++;
615
616 if (status & (MCS7830_RX_SHORT_FRAME
617 |MCS7830_RX_LENGTH_ERROR
618 |MCS7830_RX_LARGE_FRAME))
619 dev->net->stats.rx_length_errors++;
620 if (status & MCS7830_RX_ALIGNMENT_ERROR)
621 dev->net->stats.rx_frame_errors++;
622 if (status & MCS7830_RX_CRC_ERROR)
623 dev->net->stats.rx_crc_errors++;
624 }
625
545 return skb->len > 0; 626 return skb->len > 0;
546} 627}
547 628
@@ -580,6 +661,20 @@ static const struct usb_device_id products[] = {
580}; 661};
581MODULE_DEVICE_TABLE(usb, products); 662MODULE_DEVICE_TABLE(usb, products);
582 663
664static int mcs7830_reset_resume (struct usb_interface *intf)
665{
666 /* YES, this function is successful enough that ethtool -d
667 does show same output pre-/post-suspend */
668
669 struct usbnet *dev = usb_get_intfdata(intf);
670
671 mcs7830_apply_base_config(dev);
672
673 usbnet_resume(intf);
674
675 return 0;
676}
677
583static struct usb_driver mcs7830_driver = { 678static struct usb_driver mcs7830_driver = {
584 .name = driver_name, 679 .name = driver_name,
585 .id_table = products, 680 .id_table = products,
@@ -587,6 +682,7 @@ static struct usb_driver mcs7830_driver = {
587 .disconnect = usbnet_disconnect, 682 .disconnect = usbnet_disconnect,
588 .suspend = usbnet_suspend, 683 .suspend = usbnet_suspend,
589 .resume = usbnet_resume, 684 .resume = usbnet_resume,
685 .reset_resume = mcs7830_reset_resume,
590}; 686};
591 687
592static int __init mcs7830_init(void) 688static int __init mcs7830_init(void)
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index aeb1ab03a9ee..bdcad45954a3 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -203,25 +203,23 @@ static void nc_dump_registers(struct usbnet *dev)
203 203
204static inline void nc_dump_usbctl(struct usbnet *dev, u16 usbctl) 204static inline void nc_dump_usbctl(struct usbnet *dev, u16 usbctl)
205{ 205{
206 if (!netif_msg_link(dev)) 206 netif_dbg(dev, link, dev->net,
207 return; 207 "net1080 %s-%s usbctl 0x%x:%s%s%s%s%s; this%s%s; other%s%s; r/o 0x%x\n",
208 devdbg(dev, "net1080 %s-%s usbctl 0x%x:%s%s%s%s%s;" 208 dev->udev->bus->bus_name, dev->udev->devpath,
209 " this%s%s;" 209 usbctl,
210 " other%s%s; r/o 0x%x", 210 (usbctl & USBCTL_ENABLE_LANG) ? " lang" : "",
211 dev->udev->bus->bus_name, dev->udev->devpath, 211 (usbctl & USBCTL_ENABLE_MFGR) ? " mfgr" : "",
212 usbctl, 212 (usbctl & USBCTL_ENABLE_PROD) ? " prod" : "",
213 (usbctl & USBCTL_ENABLE_LANG) ? " lang" : "", 213 (usbctl & USBCTL_ENABLE_SERIAL) ? " serial" : "",
214 (usbctl & USBCTL_ENABLE_MFGR) ? " mfgr" : "", 214 (usbctl & USBCTL_ENABLE_DEFAULTS) ? " defaults" : "",
215 (usbctl & USBCTL_ENABLE_PROD) ? " prod" : "", 215
216 (usbctl & USBCTL_ENABLE_SERIAL) ? " serial" : "", 216 (usbctl & USBCTL_FLUSH_THIS) ? " FLUSH" : "",
217 (usbctl & USBCTL_ENABLE_DEFAULTS) ? " defaults" : "", 217 (usbctl & USBCTL_DISCONN_THIS) ? " DIS" : "",
218 218
219 (usbctl & USBCTL_FLUSH_OTHER) ? " FLUSH" : "", 219 (usbctl & USBCTL_FLUSH_OTHER) ? " FLUSH" : "",
220 (usbctl & USBCTL_DISCONN_OTHER) ? " DIS" : "", 220 (usbctl & USBCTL_DISCONN_OTHER) ? " DIS" : "",
221 (usbctl & USBCTL_FLUSH_THIS) ? " FLUSH" : "", 221
222 (usbctl & USBCTL_DISCONN_THIS) ? " DIS" : "", 222 usbctl & ~USBCTL_WRITABLE_MASK);
223 usbctl & ~USBCTL_WRITABLE_MASK
224 );
225} 223}
226 224
227/*-------------------------------------------------------------------------*/ 225/*-------------------------------------------------------------------------*/
@@ -248,30 +246,26 @@ static inline void nc_dump_usbctl(struct usbnet *dev, u16 usbctl)
248 246
249static inline void nc_dump_status(struct usbnet *dev, u16 status) 247static inline void nc_dump_status(struct usbnet *dev, u16 status)
250{ 248{
251 if (!netif_msg_link(dev)) 249 netif_dbg(dev, link, dev->net,
252 return; 250 "net1080 %s-%s status 0x%x: this (%c) PKT=%d%s%s%s; other PKT=%d%s%s%s; unspec 0x%x\n",
253 devdbg(dev, "net1080 %s-%s status 0x%x:" 251 dev->udev->bus->bus_name, dev->udev->devpath,
254 " this (%c) PKT=%d%s%s%s;" 252 status,
255 " other PKT=%d%s%s%s; unspec 0x%x", 253
256 dev->udev->bus->bus_name, dev->udev->devpath, 254 // XXX the packet counts don't seem right
257 status, 255 // (1 at reset, not 0); maybe UNSPEC too
258 256
259 // XXX the packet counts don't seem right 257 (status & STATUS_PORT_A) ? 'A' : 'B',
260 // (1 at reset, not 0); maybe UNSPEC too 258 STATUS_PACKETS_THIS(status),
261 259 (status & STATUS_CONN_THIS) ? " CON" : "",
262 (status & STATUS_PORT_A) ? 'A' : 'B', 260 (status & STATUS_SUSPEND_THIS) ? " SUS" : "",
263 STATUS_PACKETS_THIS(status), 261 (status & STATUS_MAILBOX_THIS) ? " MBOX" : "",
264 (status & STATUS_CONN_THIS) ? " CON" : "", 262
265 (status & STATUS_SUSPEND_THIS) ? " SUS" : "", 263 STATUS_PACKETS_OTHER(status),
266 (status & STATUS_MAILBOX_THIS) ? " MBOX" : "", 264 (status & STATUS_CONN_OTHER) ? " CON" : "",
267 265 (status & STATUS_SUSPEND_OTHER) ? " SUS" : "",
268 STATUS_PACKETS_OTHER(status), 266 (status & STATUS_MAILBOX_OTHER) ? " MBOX" : "",
269 (status & STATUS_CONN_OTHER) ? " CON" : "", 267
270 (status & STATUS_SUSPEND_OTHER) ? " SUS" : "", 268 status & STATUS_UNSPEC_MASK);
271 (status & STATUS_MAILBOX_OTHER) ? " MBOX" : "",
272
273 status & STATUS_UNSPEC_MASK
274 );
275} 269}
276 270
277/*-------------------------------------------------------------------------*/ 271/*-------------------------------------------------------------------------*/
@@ -286,10 +280,9 @@ static inline void nc_dump_status(struct usbnet *dev, u16 status)
286 280
287static inline void nc_dump_ttl(struct usbnet *dev, u16 ttl) 281static inline void nc_dump_ttl(struct usbnet *dev, u16 ttl)
288{ 282{
289 if (netif_msg_link(dev)) 283 netif_dbg(dev, link, dev->net, "net1080 %s-%s ttl 0x%x this = %d, other = %d\n",
290 devdbg(dev, "net1080 %s-%s ttl 0x%x this = %d, other = %d", 284 dev->udev->bus->bus_name, dev->udev->devpath,
291 dev->udev->bus->bus_name, dev->udev->devpath, 285 ttl, TTL_THIS(ttl), TTL_OTHER(ttl));
292 ttl, TTL_THIS(ttl), TTL_OTHER(ttl));
293} 286}
294 287
295/*-------------------------------------------------------------------------*/ 288/*-------------------------------------------------------------------------*/
@@ -334,11 +327,9 @@ static int net1080_reset(struct usbnet *dev)
334 MK_TTL(NC_READ_TTL_MS, TTL_OTHER(ttl)) ); 327 MK_TTL(NC_READ_TTL_MS, TTL_OTHER(ttl)) );
335 dbg("%s: assigned TTL, %d ms", dev->net->name, NC_READ_TTL_MS); 328 dbg("%s: assigned TTL, %d ms", dev->net->name, NC_READ_TTL_MS);
336 329
337 if (netif_msg_link(dev)) 330 netif_info(dev, link, dev->net, "port %c, peer %sconnected\n",
338 devinfo(dev, "port %c, peer %sconnected", 331 (status & STATUS_PORT_A) ? 'A' : 'B',
339 (status & STATUS_PORT_A) ? 'A' : 'B', 332 (status & STATUS_CONN_OTHER) ? "" : "dis");
340 (status & STATUS_CONN_OTHER) ? "" : "dis"
341 );
342 retval = 0; 333 retval = 0;
343 334
344done: 335done:
@@ -415,8 +406,8 @@ static void nc_ensure_sync(struct usbnet *dev)
415 return; 406 return;
416 } 407 }
417 408
418 if (netif_msg_rx_err(dev)) 409 netif_dbg(dev, rx_err, dev->net,
419 devdbg(dev, "flush net1080; too many framing errors"); 410 "flush net1080; too many framing errors\n");
420 dev->frame_errors = 0; 411 dev->frame_errors = 0;
421 } 412 }
422} 413}
@@ -486,8 +477,8 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
486 return 0; 477 return 0;
487 } 478 }
488#if 0 479#if 0
489 devdbg(dev, "frame <rx h %d p %d id %d", header->hdr_len, 480 netdev_dbg(dev->net, "frame <rx h %d p %d id %d\n", header->hdr_len,
490 header->packet_len, header->packet_id); 481 header->packet_len, header->packet_id);
491#endif 482#endif
492 dev->frame_errors = 0; 483 dev->frame_errors = 0;
493 return 1; 484 return 1;
@@ -547,9 +538,9 @@ encapsulate:
547 trailer = (struct nc_trailer *) skb_put(skb, sizeof *trailer); 538 trailer = (struct nc_trailer *) skb_put(skb, sizeof *trailer);
548 put_unaligned(header->packet_id, &trailer->packet_id); 539 put_unaligned(header->packet_id, &trailer->packet_id);
549#if 0 540#if 0
550 devdbg(dev, "frame >tx h %d p %d id %d", 541 netdev_dbg(dev->net, "frame >tx h %d p %d id %d\n",
551 header->hdr_len, header->packet_len, 542 header->hdr_len, header->packet_len,
552 header->packet_id); 543 header->packet_id);
553#endif 544#endif
554 return skb; 545 return skb;
555} 546}
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index ed4a508ef262..41838773b568 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -132,9 +132,10 @@ static void ctrl_callback(struct urb *urb)
132 case -ENOENT: 132 case -ENOENT:
133 break; 133 break;
134 default: 134 default:
135 if (netif_msg_drv(pegasus) && printk_ratelimit()) 135 if (net_ratelimit())
136 dev_dbg(&pegasus->intf->dev, "%s, status %d\n", 136 netif_dbg(pegasus, drv, pegasus->net,
137 __func__, status); 137 "%s, status %d\n", __func__, status);
138 break;
138 } 139 }
139 pegasus->flags &= ~ETH_REGS_CHANGED; 140 pegasus->flags &= ~ETH_REGS_CHANGED;
140 wake_up(&pegasus->ctrl_wait); 141 wake_up(&pegasus->ctrl_wait);
@@ -149,9 +150,8 @@ static int get_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
149 150
150 buffer = kmalloc(size, GFP_KERNEL); 151 buffer = kmalloc(size, GFP_KERNEL);
151 if (!buffer) { 152 if (!buffer) {
152 if (netif_msg_drv(pegasus)) 153 netif_warn(pegasus, drv, pegasus->net,
153 dev_warn(&pegasus->intf->dev, "out of memory in %s\n", 154 "out of memory in %s\n", __func__);
154 __func__);
155 return -ENOMEM; 155 return -ENOMEM;
156 } 156 }
157 add_wait_queue(&pegasus->ctrl_wait, &wait); 157 add_wait_queue(&pegasus->ctrl_wait, &wait);
@@ -181,9 +181,9 @@ static int get_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
181 set_current_state(TASK_RUNNING); 181 set_current_state(TASK_RUNNING);
182 if (ret == -ENODEV) 182 if (ret == -ENODEV)
183 netif_device_detach(pegasus->net); 183 netif_device_detach(pegasus->net);
184 if (netif_msg_drv(pegasus) && printk_ratelimit()) 184 if (net_ratelimit())
185 dev_err(&pegasus->intf->dev, "%s, status %d\n", 185 netif_err(pegasus, drv, pegasus->net,
186 __func__, ret); 186 "%s, status %d\n", __func__, ret);
187 goto out; 187 goto out;
188 } 188 }
189 189
@@ -205,9 +205,8 @@ static int set_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
205 205
206 buffer = kmalloc(size, GFP_KERNEL); 206 buffer = kmalloc(size, GFP_KERNEL);
207 if (!buffer) { 207 if (!buffer) {
208 if (netif_msg_drv(pegasus)) 208 netif_warn(pegasus, drv, pegasus->net,
209 dev_warn(&pegasus->intf->dev, "out of memory in %s\n", 209 "out of memory in %s\n", __func__);
210 __func__);
211 return -ENOMEM; 210 return -ENOMEM;
212 } 211 }
213 memcpy(buffer, data, size); 212 memcpy(buffer, data, size);
@@ -237,9 +236,8 @@ static int set_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
237 if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) { 236 if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
238 if (ret == -ENODEV) 237 if (ret == -ENODEV)
239 netif_device_detach(pegasus->net); 238 netif_device_detach(pegasus->net);
240 if (netif_msg_drv(pegasus)) 239 netif_err(pegasus, drv, pegasus->net,
241 dev_err(&pegasus->intf->dev, "%s, status %d\n", 240 "%s, status %d\n", __func__, ret);
242 __func__, ret);
243 goto out; 241 goto out;
244 } 242 }
245 243
@@ -259,9 +257,8 @@ static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data)
259 257
260 tmp = kmalloc(1, GFP_KERNEL); 258 tmp = kmalloc(1, GFP_KERNEL);
261 if (!tmp) { 259 if (!tmp) {
262 if (netif_msg_drv(pegasus)) 260 netif_warn(pegasus, drv, pegasus->net,
263 dev_warn(&pegasus->intf->dev, "out of memory in %s\n", 261 "out of memory in %s\n", __func__);
264 __func__);
265 return -ENOMEM; 262 return -ENOMEM;
266 } 263 }
267 memcpy(tmp, &data, 1); 264 memcpy(tmp, &data, 1);
@@ -290,9 +287,9 @@ static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data)
290 if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) { 287 if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
291 if (ret == -ENODEV) 288 if (ret == -ENODEV)
292 netif_device_detach(pegasus->net); 289 netif_device_detach(pegasus->net);
293 if (netif_msg_drv(pegasus) && printk_ratelimit()) 290 if (net_ratelimit())
294 dev_err(&pegasus->intf->dev, "%s, status %d\n", 291 netif_err(pegasus, drv, pegasus->net,
295 __func__, ret); 292 "%s, status %d\n", __func__, ret);
296 goto out; 293 goto out;
297 } 294 }
298 295
@@ -323,9 +320,8 @@ static int update_eth_regs_async(pegasus_t * pegasus)
323 if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) { 320 if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
324 if (ret == -ENODEV) 321 if (ret == -ENODEV)
325 netif_device_detach(pegasus->net); 322 netif_device_detach(pegasus->net);
326 if (netif_msg_drv(pegasus)) 323 netif_err(pegasus, drv, pegasus->net,
327 dev_err(&pegasus->intf->dev, "%s, status %d\n", 324 "%s, status %d\n", __func__, ret);
328 __func__, ret);
329 } 325 }
330 326
331 return ret; 327 return ret;
@@ -349,14 +345,16 @@ static int read_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 * regd)
349 if (data[0] & PHY_DONE) 345 if (data[0] & PHY_DONE)
350 break; 346 break;
351 } 347 }
352 if (i < REG_TIMEOUT) { 348
353 ret = get_registers(pegasus, PhyData, 2, &regdi); 349 if (i >= REG_TIMEOUT)
354 *regd = le16_to_cpu(regdi); 350 goto fail;
355 return ret; 351
356 } 352 ret = get_registers(pegasus, PhyData, 2, &regdi);
353 *regd = le16_to_cpu(regdi);
354 return ret;
355
357fail: 356fail:
358 if (netif_msg_drv(pegasus)) 357 netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
359 dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
360 358
361 return ret; 359 return ret;
362} 360}
@@ -388,12 +386,14 @@ static int write_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 regd)
388 if (data[0] & PHY_DONE) 386 if (data[0] & PHY_DONE)
389 break; 387 break;
390 } 388 }
391 if (i < REG_TIMEOUT) 389
392 return ret; 390 if (i >= REG_TIMEOUT)
391 goto fail;
392
393 return ret;
393 394
394fail: 395fail:
395 if (netif_msg_drv(pegasus)) 396 netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
396 dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
397 return -ETIMEDOUT; 397 return -ETIMEDOUT;
398} 398}
399 399
@@ -422,15 +422,15 @@ static int read_eprom_word(pegasus_t * pegasus, __u8 index, __u16 * retdata)
422 if (ret == -ESHUTDOWN) 422 if (ret == -ESHUTDOWN)
423 goto fail; 423 goto fail;
424 } 424 }
425 if (i < REG_TIMEOUT) { 425 if (i >= REG_TIMEOUT)
426 ret = get_registers(pegasus, EpromData, 2, &retdatai); 426 goto fail;
427 *retdata = le16_to_cpu(retdatai); 427
428 return ret; 428 ret = get_registers(pegasus, EpromData, 2, &retdatai);
429 } 429 *retdata = le16_to_cpu(retdatai);
430 return ret;
430 431
431fail: 432fail:
432 if (netif_msg_drv(pegasus)) 433 netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
433 dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
434 return -ETIMEDOUT; 434 return -ETIMEDOUT;
435} 435}
436 436
@@ -475,11 +475,13 @@ static int write_eprom_word(pegasus_t * pegasus, __u8 index, __u16 data)
475 break; 475 break;
476 } 476 }
477 disable_eprom_write(pegasus); 477 disable_eprom_write(pegasus);
478 if (i < REG_TIMEOUT) 478 if (i >= REG_TIMEOUT)
479 return ret; 479 goto fail;
480
481 return ret;
482
480fail: 483fail:
481 if (netif_msg_drv(pegasus)) 484 netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
482 dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
483 return -ETIMEDOUT; 485 return -ETIMEDOUT;
484} 486}
485#endif /* PEGASUS_WRITE_EEPROM */ 487#endif /* PEGASUS_WRITE_EEPROM */
@@ -642,25 +644,20 @@ static void read_bulk_callback(struct urb *urb)
642 case 0: 644 case 0:
643 break; 645 break;
644 case -ETIME: 646 case -ETIME:
645 if (netif_msg_rx_err(pegasus)) 647 netif_dbg(pegasus, rx_err, net, "reset MAC\n");
646 pr_debug("%s: reset MAC\n", net->name);
647 pegasus->flags &= ~PEGASUS_RX_BUSY; 648 pegasus->flags &= ~PEGASUS_RX_BUSY;
648 break; 649 break;
649 case -EPIPE: /* stall, or disconnect from TT */ 650 case -EPIPE: /* stall, or disconnect from TT */
650 /* FIXME schedule work to clear the halt */ 651 /* FIXME schedule work to clear the halt */
651 if (netif_msg_rx_err(pegasus)) 652 netif_warn(pegasus, rx_err, net, "no rx stall recovery\n");
652 printk(KERN_WARNING "%s: no rx stall recovery\n",
653 net->name);
654 return; 653 return;
655 case -ENOENT: 654 case -ENOENT:
656 case -ECONNRESET: 655 case -ECONNRESET:
657 case -ESHUTDOWN: 656 case -ESHUTDOWN:
658 if (netif_msg_ifdown(pegasus)) 657 netif_dbg(pegasus, ifdown, net, "rx unlink, %d\n", status);
659 pr_debug("%s: rx unlink, %d\n", net->name, status);
660 return; 658 return;
661 default: 659 default:
662 if (netif_msg_rx_err(pegasus)) 660 netif_dbg(pegasus, rx_err, net, "RX status %d\n", status);
663 pr_debug("%s: RX status %d\n", net->name, status);
664 goto goon; 661 goto goon;
665 } 662 }
666 663
@@ -669,9 +666,8 @@ static void read_bulk_callback(struct urb *urb)
669 666
670 rx_status = buf[count - 2]; 667 rx_status = buf[count - 2];
671 if (rx_status & 0x1e) { 668 if (rx_status & 0x1e) {
672 if (netif_msg_rx_err(pegasus)) 669 netif_dbg(pegasus, rx_err, net,
673 pr_debug("%s: RX packet error %x\n", 670 "RX packet error %x\n", rx_status);
674 net->name, rx_status);
675 pegasus->stats.rx_errors++; 671 pegasus->stats.rx_errors++;
676 if (rx_status & 0x06) // long or runt 672 if (rx_status & 0x06) // long or runt
677 pegasus->stats.rx_length_errors++; 673 pegasus->stats.rx_length_errors++;
@@ -758,9 +754,7 @@ static void rx_fixup(unsigned long data)
758 pegasus->rx_skb = pull_skb(pegasus); 754 pegasus->rx_skb = pull_skb(pegasus);
759 } 755 }
760 if (pegasus->rx_skb == NULL) { 756 if (pegasus->rx_skb == NULL) {
761 if (netif_msg_rx_err(pegasus)) 757 netif_warn(pegasus, rx_err, pegasus->net, "low on memory\n");
762 printk(KERN_WARNING "%s: low on memory\n",
763 pegasus->net->name);
764 tasklet_schedule(&pegasus->rx_tl); 758 tasklet_schedule(&pegasus->rx_tl);
765 goto done; 759 goto done;
766 } 760 }
@@ -800,19 +794,15 @@ static void write_bulk_callback(struct urb *urb)
800 case -EPIPE: 794 case -EPIPE:
801 /* FIXME schedule_work() to clear the tx halt */ 795 /* FIXME schedule_work() to clear the tx halt */
802 netif_stop_queue(net); 796 netif_stop_queue(net);
803 if (netif_msg_tx_err(pegasus)) 797 netif_warn(pegasus, tx_err, net, "no tx stall recovery\n");
804 printk(KERN_WARNING "%s: no tx stall recovery\n",
805 net->name);
806 return; 798 return;
807 case -ENOENT: 799 case -ENOENT:
808 case -ECONNRESET: 800 case -ECONNRESET:
809 case -ESHUTDOWN: 801 case -ESHUTDOWN:
810 if (netif_msg_ifdown(pegasus)) 802 netif_dbg(pegasus, ifdown, net, "tx unlink, %d\n", status);
811 pr_debug("%s: tx unlink, %d\n", net->name, status);
812 return; 803 return;
813 default: 804 default:
814 if (netif_msg_tx_err(pegasus)) 805 netif_info(pegasus, tx_err, net, "TX status %d\n", status);
815 pr_info("%s: TX status %d\n", net->name, status);
816 /* FALL THROUGH */ 806 /* FALL THROUGH */
817 case 0: 807 case 0:
818 break; 808 break;
@@ -843,9 +833,7 @@ static void intr_callback(struct urb *urb)
843 /* some Pegasus-I products report LOTS of data 833 /* some Pegasus-I products report LOTS of data
844 * toggle errors... avoid log spamming 834 * toggle errors... avoid log spamming
845 */ 835 */
846 if (netif_msg_timer(pegasus)) 836 netif_dbg(pegasus, timer, net, "intr status %d\n", status);
847 pr_debug("%s: intr status %d\n", net->name,
848 status);
849 } 837 }
850 838
851 if (urb->actual_length >= 6) { 839 if (urb->actual_length >= 6) {
@@ -875,16 +863,15 @@ static void intr_callback(struct urb *urb)
875 res = usb_submit_urb(urb, GFP_ATOMIC); 863 res = usb_submit_urb(urb, GFP_ATOMIC);
876 if (res == -ENODEV) 864 if (res == -ENODEV)
877 netif_device_detach(pegasus->net); 865 netif_device_detach(pegasus->net);
878 if (res && netif_msg_timer(pegasus)) 866 if (res)
879 printk(KERN_ERR "%s: can't resubmit interrupt urb, %d\n", 867 netif_err(pegasus, timer, net,
880 net->name, res); 868 "can't resubmit interrupt urb, %d\n", res);
881} 869}
882 870
883static void pegasus_tx_timeout(struct net_device *net) 871static void pegasus_tx_timeout(struct net_device *net)
884{ 872{
885 pegasus_t *pegasus = netdev_priv(net); 873 pegasus_t *pegasus = netdev_priv(net);
886 if (netif_msg_timer(pegasus)) 874 netif_warn(pegasus, timer, net, "tx timeout\n");
887 printk(KERN_WARNING "%s: tx timeout\n", net->name);
888 usb_unlink_urb(pegasus->tx_urb); 875 usb_unlink_urb(pegasus->tx_urb);
889 pegasus->stats.tx_errors++; 876 pegasus->stats.tx_errors++;
890} 877}
@@ -906,9 +893,7 @@ static netdev_tx_t pegasus_start_xmit(struct sk_buff *skb,
906 pegasus->tx_buff, count, 893 pegasus->tx_buff, count,
907 write_bulk_callback, pegasus); 894 write_bulk_callback, pegasus);
908 if ((res = usb_submit_urb(pegasus->tx_urb, GFP_ATOMIC))) { 895 if ((res = usb_submit_urb(pegasus->tx_urb, GFP_ATOMIC))) {
909 if (netif_msg_tx_err(pegasus)) 896 netif_warn(pegasus, tx_err, net, "fail tx, %d\n", res);
910 printk(KERN_WARNING "%s: fail tx, %d\n",
911 net->name, res);
912 switch (res) { 897 switch (res) {
913 case -EPIPE: /* stall, or disconnect from TT */ 898 case -EPIPE: /* stall, or disconnect from TT */
914 /* cleanup should already have been scheduled */ 899 /* cleanup should already have been scheduled */
@@ -952,10 +937,9 @@ static inline void get_interrupt_interval(pegasus_t * pegasus)
952 interval = data >> 8; 937 interval = data >> 8;
953 if (pegasus->usb->speed != USB_SPEED_HIGH) { 938 if (pegasus->usb->speed != USB_SPEED_HIGH) {
954 if (interval < 0x80) { 939 if (interval < 0x80) {
955 if (netif_msg_timer(pegasus)) 940 netif_info(pegasus, timer, pegasus->net,
956 dev_info(&pegasus->intf->dev, "intr interval " 941 "intr interval changed from %ums to %ums\n",
957 "changed from %ums to %ums\n", 942 interval, 0x80);
958 interval, 0x80);
959 interval = 0x80; 943 interval = 0x80;
960 data = (data & 0x00FF) | ((u16)interval << 8); 944 data = (data & 0x00FF) | ((u16)interval << 8);
961#ifdef PEGASUS_WRITE_EEPROM 945#ifdef PEGASUS_WRITE_EEPROM
@@ -1046,8 +1030,7 @@ static int pegasus_open(struct net_device *net)
1046 if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) { 1030 if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) {
1047 if (res == -ENODEV) 1031 if (res == -ENODEV)
1048 netif_device_detach(pegasus->net); 1032 netif_device_detach(pegasus->net);
1049 if (netif_msg_ifup(pegasus)) 1033 netif_dbg(pegasus, ifup, net, "failed rx_urb, %d\n", res);
1050 pr_debug("%s: failed rx_urb, %d", net->name, res);
1051 goto exit; 1034 goto exit;
1052 } 1035 }
1053 1036
@@ -1058,15 +1041,13 @@ static int pegasus_open(struct net_device *net)
1058 if ((res = usb_submit_urb(pegasus->intr_urb, GFP_KERNEL))) { 1041 if ((res = usb_submit_urb(pegasus->intr_urb, GFP_KERNEL))) {
1059 if (res == -ENODEV) 1042 if (res == -ENODEV)
1060 netif_device_detach(pegasus->net); 1043 netif_device_detach(pegasus->net);
1061 if (netif_msg_ifup(pegasus)) 1044 netif_dbg(pegasus, ifup, net, "failed intr_urb, %d\n", res);
1062 pr_debug("%s: failed intr_urb, %d\n", net->name, res);
1063 usb_kill_urb(pegasus->rx_urb); 1045 usb_kill_urb(pegasus->rx_urb);
1064 goto exit; 1046 goto exit;
1065 } 1047 }
1066 if ((res = enable_net_traffic(net, pegasus->usb))) { 1048 if ((res = enable_net_traffic(net, pegasus->usb))) {
1067 if (netif_msg_ifup(pegasus)) 1049 netif_dbg(pegasus, ifup, net,
1068 pr_debug("%s: can't enable_net_traffic() - %d\n", 1050 "can't enable_net_traffic() - %d\n", res);
1069 net->name, res);
1070 res = -EIO; 1051 res = -EIO;
1071 usb_kill_urb(pegasus->rx_urb); 1052 usb_kill_urb(pegasus->rx_urb);
1072 usb_kill_urb(pegasus->intr_urb); 1053 usb_kill_urb(pegasus->intr_urb);
@@ -1075,8 +1056,7 @@ static int pegasus_open(struct net_device *net)
1075 } 1056 }
1076 set_carrier(net); 1057 set_carrier(net);
1077 netif_start_queue(net); 1058 netif_start_queue(net);
1078 if (netif_msg_ifup(pegasus)) 1059 netif_dbg(pegasus, ifup, net, "open\n");
1079 pr_debug("%s: open\n", net->name);
1080 res = 0; 1060 res = 0;
1081exit: 1061exit:
1082 return res; 1062 return res;
@@ -1230,13 +1210,11 @@ static void pegasus_set_multicast(struct net_device *net)
1230 1210
1231 if (net->flags & IFF_PROMISC) { 1211 if (net->flags & IFF_PROMISC) {
1232 pegasus->eth_regs[EthCtrl2] |= RX_PROMISCUOUS; 1212 pegasus->eth_regs[EthCtrl2] |= RX_PROMISCUOUS;
1233 if (netif_msg_link(pegasus)) 1213 netif_info(pegasus, link, net, "Promiscuous mode enabled\n");
1234 pr_info("%s: Promiscuous mode enabled.\n", net->name); 1214 } else if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) {
1235 } else if (net->mc_count || (net->flags & IFF_ALLMULTI)) {
1236 pegasus->eth_regs[EthCtrl0] |= RX_MULTICAST; 1215 pegasus->eth_regs[EthCtrl0] |= RX_MULTICAST;
1237 pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS; 1216 pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
1238 if (netif_msg_link(pegasus)) 1217 netif_dbg(pegasus, link, net, "set allmulti\n");
1239 pr_debug("%s: set allmulti\n", net->name);
1240 } else { 1218 } else {
1241 pegasus->eth_regs[EthCtrl0] &= ~RX_MULTICAST; 1219 pegasus->eth_regs[EthCtrl0] &= ~RX_MULTICAST;
1242 pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS; 1220 pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 490fa8f55424..4ce331fb1e1e 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -57,8 +57,8 @@
57 */ 57 */
58void rndis_status(struct usbnet *dev, struct urb *urb) 58void rndis_status(struct usbnet *dev, struct urb *urb)
59{ 59{
60 devdbg(dev, "rndis status urb, len %d stat %d", 60 netdev_dbg(dev->net, "rndis status urb, len %d stat %d\n",
61 urb->actual_length, urb->status); 61 urb->actual_length, urb->status);
62 // FIXME for keepalives, respond immediately (asynchronously) 62 // FIXME for keepalives, respond immediately (asynchronously)
63 // if not an RNDIS status, do like cdc_status(dev,urb) does 63 // if not an RNDIS status, do like cdc_status(dev,urb) does
64} 64}
@@ -335,8 +335,8 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
335 335
336 dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1); 336 dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1);
337 if (dev->maxpacket == 0) { 337 if (dev->maxpacket == 0) {
338 if (netif_msg_probe(dev)) 338 netif_dbg(dev, probe, dev->net,
339 dev_dbg(&intf->dev, "dev->maxpacket can't be 0\n"); 339 "dev->maxpacket can't be 0\n");
340 retval = -EINVAL; 340 retval = -EINVAL;
341 goto fail_and_release; 341 goto fail_and_release;
342 } 342 }
@@ -394,17 +394,15 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
394 } 394 }
395 if ((flags & FLAG_RNDIS_PHYM_WIRELESS) && 395 if ((flags & FLAG_RNDIS_PHYM_WIRELESS) &&
396 *phym != RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) { 396 *phym != RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) {
397 if (netif_msg_probe(dev)) 397 netif_dbg(dev, probe, dev->net,
398 dev_dbg(&intf->dev, "driver requires wireless " 398 "driver requires wireless physical medium, but device is not\n");
399 "physical medium, but device is not.\n");
400 retval = -ENODEV; 399 retval = -ENODEV;
401 goto halt_fail_and_release; 400 goto halt_fail_and_release;
402 } 401 }
403 if ((flags & FLAG_RNDIS_PHYM_NOT_WIRELESS) && 402 if ((flags & FLAG_RNDIS_PHYM_NOT_WIRELESS) &&
404 *phym == RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) { 403 *phym == RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) {
405 if (netif_msg_probe(dev)) 404 netif_dbg(dev, probe, dev->net,
406 dev_dbg(&intf->dev, "driver requires non-wireless " 405 "driver requires non-wireless physical medium, but device is wireless.\n");
407 "physical medium, but device is wireless.\n");
408 retval = -ENODEV; 406 retval = -ENODEV;
409 goto halt_fail_and_release; 407 goto halt_fail_and_release;
410 } 408 }
@@ -497,9 +495,9 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
497 skb->len < msg_len || 495 skb->len < msg_len ||
498 (data_offset + data_len + 8) > msg_len)) { 496 (data_offset + data_len + 8) > msg_len)) {
499 dev->net->stats.rx_frame_errors++; 497 dev->net->stats.rx_frame_errors++;
500 devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d", 498 netdev_dbg(dev->net, "bad rndis message %d/%d/%d/%d, len %d\n",
501 le32_to_cpu(hdr->msg_type), 499 le32_to_cpu(hdr->msg_type),
502 msg_len, data_offset, data_len, skb->len); 500 msg_len, data_offset, data_len, skb->len);
503 return 0; 501 return 0;
504 } 502 }
505 skb_pull(skb, 8 + data_offset); 503 skb_pull(skb, 8 + data_offset);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index fd19db0d2504..e85c89c6706d 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -313,20 +313,17 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
313{ 313{
314 struct sockaddr *addr = p; 314 struct sockaddr *addr = p;
315 rtl8150_t *dev = netdev_priv(netdev); 315 rtl8150_t *dev = netdev_priv(netdev);
316 int i;
317 316
318 if (netif_running(netdev)) 317 if (netif_running(netdev))
319 return -EBUSY; 318 return -EBUSY;
320 319
321 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 320 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
322 dbg("%s: Setting MAC address to ", netdev->name); 321 dbg("%s: Setting MAC address to %pM\n", netdev->name, netdev->dev_addr);
323 for (i = 0; i < 5; i++)
324 dbg("%02X:", netdev->dev_addr[i]);
325 dbg("%02X\n", netdev->dev_addr[i]);
326 /* Set the IDR registers. */ 322 /* Set the IDR registers. */
327 set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr); 323 set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
328#ifdef EEPROM_WRITE 324#ifdef EEPROM_WRITE
329 { 325 {
326 int i;
330 u8 cr; 327 u8 cr;
331 /* Get the CR contents. */ 328 /* Get the CR contents. */
332 get_registers(dev, CR, 1, &cr); 329 get_registers(dev, CR, 1, &cr);
@@ -714,7 +711,7 @@ static void rtl8150_set_multicast(struct net_device *netdev)
714 if (netdev->flags & IFF_PROMISC) { 711 if (netdev->flags & IFF_PROMISC) {
715 dev->rx_creg |= cpu_to_le16(0x0001); 712 dev->rx_creg |= cpu_to_le16(0x0001);
716 dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name); 713 dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name);
717 } else if (netdev->mc_count || 714 } else if (!netdev_mc_empty(netdev) ||
718 (netdev->flags & IFF_ALLMULTI)) { 715 (netdev->flags & IFF_ALLMULTI)) {
719 dev->rx_creg &= cpu_to_le16(0xfffe); 716 dev->rx_creg &= cpu_to_le16(0xfffe);
720 dev->rx_creg |= cpu_to_le16(0x0002); 717 dev->rx_creg |= cpu_to_le16(0x0002);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 0c3c738d7419..df9179a1c93b 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -78,7 +78,7 @@ static int smsc95xx_read_reg(struct usbnet *dev, u32 index, u32 *data)
78 00, index, buf, 4, USB_CTRL_GET_TIMEOUT); 78 00, index, buf, 4, USB_CTRL_GET_TIMEOUT);
79 79
80 if (unlikely(ret < 0)) 80 if (unlikely(ret < 0))
81 devwarn(dev, "Failed to read register index 0x%08x", index); 81 netdev_warn(dev->net, "Failed to read register index 0x%08x\n", index);
82 82
83 le32_to_cpus(buf); 83 le32_to_cpus(buf);
84 *data = *buf; 84 *data = *buf;
@@ -106,7 +106,7 @@ static int smsc95xx_write_reg(struct usbnet *dev, u32 index, u32 data)
106 00, index, buf, 4, USB_CTRL_SET_TIMEOUT); 106 00, index, buf, 4, USB_CTRL_SET_TIMEOUT);
107 107
108 if (unlikely(ret < 0)) 108 if (unlikely(ret < 0))
109 devwarn(dev, "Failed to write register index 0x%08x", index); 109 netdev_warn(dev->net, "Failed to write register index 0x%08x\n", index);
110 110
111 kfree(buf); 111 kfree(buf);
112 112
@@ -138,7 +138,7 @@ static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
138 138
139 /* confirm MII not busy */ 139 /* confirm MII not busy */
140 if (smsc95xx_phy_wait_not_busy(dev)) { 140 if (smsc95xx_phy_wait_not_busy(dev)) {
141 devwarn(dev, "MII is busy in smsc95xx_mdio_read"); 141 netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_read\n");
142 mutex_unlock(&dev->phy_mutex); 142 mutex_unlock(&dev->phy_mutex);
143 return -EIO; 143 return -EIO;
144 } 144 }
@@ -150,7 +150,7 @@ static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
150 smsc95xx_write_reg(dev, MII_ADDR, addr); 150 smsc95xx_write_reg(dev, MII_ADDR, addr);
151 151
152 if (smsc95xx_phy_wait_not_busy(dev)) { 152 if (smsc95xx_phy_wait_not_busy(dev)) {
153 devwarn(dev, "Timed out reading MII reg %02X", idx); 153 netdev_warn(dev->net, "Timed out reading MII reg %02X\n", idx);
154 mutex_unlock(&dev->phy_mutex); 154 mutex_unlock(&dev->phy_mutex);
155 return -EIO; 155 return -EIO;
156 } 156 }
@@ -172,7 +172,7 @@ static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
172 172
173 /* confirm MII not busy */ 173 /* confirm MII not busy */
174 if (smsc95xx_phy_wait_not_busy(dev)) { 174 if (smsc95xx_phy_wait_not_busy(dev)) {
175 devwarn(dev, "MII is busy in smsc95xx_mdio_write"); 175 netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_write\n");
176 mutex_unlock(&dev->phy_mutex); 176 mutex_unlock(&dev->phy_mutex);
177 return; 177 return;
178 } 178 }
@@ -187,7 +187,7 @@ static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
187 smsc95xx_write_reg(dev, MII_ADDR, addr); 187 smsc95xx_write_reg(dev, MII_ADDR, addr);
188 188
189 if (smsc95xx_phy_wait_not_busy(dev)) 189 if (smsc95xx_phy_wait_not_busy(dev))
190 devwarn(dev, "Timed out writing MII reg %02X", idx); 190 netdev_warn(dev->net, "Timed out writing MII reg %02X\n", idx);
191 191
192 mutex_unlock(&dev->phy_mutex); 192 mutex_unlock(&dev->phy_mutex);
193} 193}
@@ -205,7 +205,7 @@ static int smsc95xx_wait_eeprom(struct usbnet *dev)
205 } while (!time_after(jiffies, start_time + HZ)); 205 } while (!time_after(jiffies, start_time + HZ));
206 206
207 if (val & (E2P_CMD_TIMEOUT_ | E2P_CMD_BUSY_)) { 207 if (val & (E2P_CMD_TIMEOUT_ | E2P_CMD_BUSY_)) {
208 devwarn(dev, "EEPROM read operation timeout"); 208 netdev_warn(dev->net, "EEPROM read operation timeout\n");
209 return -EIO; 209 return -EIO;
210 } 210 }
211 211
@@ -226,7 +226,7 @@ static int smsc95xx_eeprom_confirm_not_busy(struct usbnet *dev)
226 udelay(40); 226 udelay(40);
227 } while (!time_after(jiffies, start_time + HZ)); 227 } while (!time_after(jiffies, start_time + HZ));
228 228
229 devwarn(dev, "EEPROM is busy"); 229 netdev_warn(dev->net, "EEPROM is busy\n");
230 return -EIO; 230 return -EIO;
231} 231}
232 232
@@ -308,7 +308,7 @@ static void smsc95xx_async_cmd_callback(struct urb *urb)
308 int status = urb->status; 308 int status = urb->status;
309 309
310 if (status < 0) 310 if (status < 0)
311 devwarn(dev, "async callback failed with %d", status); 311 netdev_warn(dev->net, "async callback failed with %d\n", status);
312 312
313 kfree(usb_context); 313 kfree(usb_context);
314 usb_free_urb(urb); 314 usb_free_urb(urb);
@@ -323,13 +323,13 @@ static int smsc95xx_write_reg_async(struct usbnet *dev, u16 index, u32 *data)
323 323
324 urb = usb_alloc_urb(0, GFP_ATOMIC); 324 urb = usb_alloc_urb(0, GFP_ATOMIC);
325 if (!urb) { 325 if (!urb) {
326 devwarn(dev, "Error allocating URB"); 326 netdev_warn(dev->net, "Error allocating URB\n");
327 return -ENOMEM; 327 return -ENOMEM;
328 } 328 }
329 329
330 usb_context = kmalloc(sizeof(struct usb_context), GFP_ATOMIC); 330 usb_context = kmalloc(sizeof(struct usb_context), GFP_ATOMIC);
331 if (usb_context == NULL) { 331 if (usb_context == NULL) {
332 devwarn(dev, "Error allocating control msg"); 332 netdev_warn(dev->net, "Error allocating control msg\n");
333 usb_free_urb(urb); 333 usb_free_urb(urb);
334 return -ENOMEM; 334 return -ENOMEM;
335 } 335 }
@@ -348,7 +348,8 @@ static int smsc95xx_write_reg_async(struct usbnet *dev, u16 index, u32 *data)
348 348
349 status = usb_submit_urb(urb, GFP_ATOMIC); 349 status = usb_submit_urb(urb, GFP_ATOMIC);
350 if (status < 0) { 350 if (status < 0) {
351 devwarn(dev, "Error submitting control msg, sts=%d", status); 351 netdev_warn(dev->net, "Error submitting control msg, sts=%d\n",
352 status);
352 kfree(usb_context); 353 kfree(usb_context);
353 usb_free_urb(urb); 354 usb_free_urb(urb);
354 } 355 }
@@ -375,46 +376,32 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
375 spin_lock_irqsave(&pdata->mac_cr_lock, flags); 376 spin_lock_irqsave(&pdata->mac_cr_lock, flags);
376 377
377 if (dev->net->flags & IFF_PROMISC) { 378 if (dev->net->flags & IFF_PROMISC) {
378 if (netif_msg_drv(dev)) 379 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled\n");
379 devdbg(dev, "promiscuous mode enabled");
380 pdata->mac_cr |= MAC_CR_PRMS_; 380 pdata->mac_cr |= MAC_CR_PRMS_;
381 pdata->mac_cr &= ~(MAC_CR_MCPAS_ | MAC_CR_HPFILT_); 381 pdata->mac_cr &= ~(MAC_CR_MCPAS_ | MAC_CR_HPFILT_);
382 } else if (dev->net->flags & IFF_ALLMULTI) { 382 } else if (dev->net->flags & IFF_ALLMULTI) {
383 if (netif_msg_drv(dev)) 383 netif_dbg(dev, drv, dev->net, "receive all multicast enabled\n");
384 devdbg(dev, "receive all multicast enabled");
385 pdata->mac_cr |= MAC_CR_MCPAS_; 384 pdata->mac_cr |= MAC_CR_MCPAS_;
386 pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_HPFILT_); 385 pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_HPFILT_);
387 } else if (dev->net->mc_count > 0) { 386 } else if (!netdev_mc_empty(dev->net)) {
388 struct dev_mc_list *mc_list = dev->net->mc_list; 387 struct dev_mc_list *mc_list;
389 int count = 0;
390 388
391 pdata->mac_cr |= MAC_CR_HPFILT_; 389 pdata->mac_cr |= MAC_CR_HPFILT_;
392 pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_); 390 pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
393 391
394 while (mc_list) { 392 netdev_for_each_mc_addr(mc_list, netdev) {
395 count++; 393 u32 bitnum = smsc95xx_hash(mc_list->dmi_addr);
396 if (mc_list->dmi_addrlen == ETH_ALEN) { 394 u32 mask = 0x01 << (bitnum & 0x1F);
397 u32 bitnum = smsc95xx_hash(mc_list->dmi_addr); 395 if (bitnum & 0x20)
398 u32 mask = 0x01 << (bitnum & 0x1F); 396 hash_hi |= mask;
399 if (bitnum & 0x20) 397 else
400 hash_hi |= mask; 398 hash_lo |= mask;
401 else
402 hash_lo |= mask;
403 } else {
404 devwarn(dev, "dmi_addrlen != 6");
405 }
406 mc_list = mc_list->next;
407 } 399 }
408 400
409 if (count != ((u32)dev->net->mc_count)) 401 netif_dbg(dev, drv, dev->net, "HASHH=0x%08X, HASHL=0x%08X\n",
410 devwarn(dev, "mc_count != dev->mc_count"); 402 hash_hi, hash_lo);
411
412 if (netif_msg_drv(dev))
413 devdbg(dev, "HASHH=0x%08X, HASHL=0x%08X", hash_hi,
414 hash_lo);
415 } else { 403 } else {
416 if (netif_msg_drv(dev)) 404 netif_dbg(dev, drv, dev->net, "receive own packets only\n");
417 devdbg(dev, "receive own packets only");
418 pdata->mac_cr &= 405 pdata->mac_cr &=
419 ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_ | MAC_CR_HPFILT_); 406 ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_ | MAC_CR_HPFILT_);
420 } 407 }
@@ -434,7 +421,7 @@ static void smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
434 421
435 int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg); 422 int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg);
436 if (ret < 0) { 423 if (ret < 0) {
437 devwarn(dev, "error reading AFC_CFG"); 424 netdev_warn(dev->net, "error reading AFC_CFG\n");
438 return; 425 return;
439 } 426 }
440 427
@@ -451,13 +438,11 @@ static void smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
451 else 438 else
452 afc_cfg &= ~0xF; 439 afc_cfg &= ~0xF;
453 440
454 if (netif_msg_link(dev)) 441 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s\n",
455 devdbg(dev, "rx pause %s, tx pause %s", 442 cap & FLOW_CTRL_RX ? "enabled" : "disabled",
456 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), 443 cap & FLOW_CTRL_TX ? "enabled" : "disabled");
457 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
458 } else { 444 } else {
459 if (netif_msg_link(dev)) 445 netif_dbg(dev, link, dev->net, "half duplex\n");
460 devdbg(dev, "half duplex");
461 flow = 0; 446 flow = 0;
462 afc_cfg |= 0xF; 447 afc_cfg |= 0xF;
463 } 448 }
@@ -485,9 +470,8 @@ static int smsc95xx_link_reset(struct usbnet *dev)
485 lcladv = smsc95xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE); 470 lcladv = smsc95xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
486 rmtadv = smsc95xx_mdio_read(dev->net, mii->phy_id, MII_LPA); 471 rmtadv = smsc95xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
487 472
488 if (netif_msg_link(dev)) 473 netif_dbg(dev, link, dev->net, "speed: %d duplex: %d lcladv: %04x rmtadv: %04x\n",
489 devdbg(dev, "speed: %d duplex: %d lcladv: %04x rmtadv: %04x", 474 ecmd.speed, ecmd.duplex, lcladv, rmtadv);
490 ecmd.speed, ecmd.duplex, lcladv, rmtadv);
491 475
492 spin_lock_irqsave(&pdata->mac_cr_lock, flags); 476 spin_lock_irqsave(&pdata->mac_cr_lock, flags);
493 if (ecmd.duplex != DUPLEX_FULL) { 477 if (ecmd.duplex != DUPLEX_FULL) {
@@ -511,20 +495,21 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
511 u32 intdata; 495 u32 intdata;
512 496
513 if (urb->actual_length != 4) { 497 if (urb->actual_length != 4) {
514 devwarn(dev, "unexpected urb length %d", urb->actual_length); 498 netdev_warn(dev->net, "unexpected urb length %d\n",
499 urb->actual_length);
515 return; 500 return;
516 } 501 }
517 502
518 memcpy(&intdata, urb->transfer_buffer, 4); 503 memcpy(&intdata, urb->transfer_buffer, 4);
519 le32_to_cpus(&intdata); 504 le32_to_cpus(&intdata);
520 505
521 if (netif_msg_link(dev)) 506 netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata);
522 devdbg(dev, "intdata: 0x%08X", intdata);
523 507
524 if (intdata & INT_ENP_PHY_INT_) 508 if (intdata & INT_ENP_PHY_INT_)
525 usbnet_defer_kevent(dev, EVENT_LINK_RESET); 509 usbnet_defer_kevent(dev, EVENT_LINK_RESET);
526 else 510 else
527 devwarn(dev, "unexpected interrupt, intdata=0x%08X", intdata); 511 netdev_warn(dev->net, "unexpected interrupt, intdata=0x%08X\n",
512 intdata);
528} 513}
529 514
530/* Enable or disable Tx & Rx checksum offload engines */ 515/* Enable or disable Tx & Rx checksum offload engines */
@@ -534,7 +519,7 @@ static int smsc95xx_set_csums(struct usbnet *dev)
534 u32 read_buf; 519 u32 read_buf;
535 int ret = smsc95xx_read_reg(dev, COE_CR, &read_buf); 520 int ret = smsc95xx_read_reg(dev, COE_CR, &read_buf);
536 if (ret < 0) { 521 if (ret < 0) {
537 devwarn(dev, "Failed to read COE_CR: %d", ret); 522 netdev_warn(dev->net, "Failed to read COE_CR: %d\n", ret);
538 return ret; 523 return ret;
539 } 524 }
540 525
@@ -550,12 +535,11 @@ static int smsc95xx_set_csums(struct usbnet *dev)
550 535
551 ret = smsc95xx_write_reg(dev, COE_CR, read_buf); 536 ret = smsc95xx_write_reg(dev, COE_CR, read_buf);
552 if (ret < 0) { 537 if (ret < 0) {
553 devwarn(dev, "Failed to write COE_CR: %d", ret); 538 netdev_warn(dev->net, "Failed to write COE_CR: %d\n", ret);
554 return ret; 539 return ret;
555 } 540 }
556 541
557 if (netif_msg_hw(dev)) 542 netif_dbg(dev, hw, dev->net, "COE_CR = 0x%08x\n", read_buf);
558 devdbg(dev, "COE_CR = 0x%08x", read_buf);
559 return 0; 543 return 0;
560} 544}
561 545
@@ -580,8 +564,8 @@ static int smsc95xx_ethtool_set_eeprom(struct net_device *netdev,
580 struct usbnet *dev = netdev_priv(netdev); 564 struct usbnet *dev = netdev_priv(netdev);
581 565
582 if (ee->magic != LAN95XX_EEPROM_MAGIC) { 566 if (ee->magic != LAN95XX_EEPROM_MAGIC) {
583 devwarn(dev, "EEPROM: magic value mismatch, magic = 0x%x", 567 netdev_warn(dev->net, "EEPROM: magic value mismatch, magic = 0x%x\n",
584 ee->magic); 568 ee->magic);
585 return -EINVAL; 569 return -EINVAL;
586 } 570 }
587 571
@@ -659,16 +643,14 @@ static void smsc95xx_init_mac_address(struct usbnet *dev)
659 dev->net->dev_addr) == 0) { 643 dev->net->dev_addr) == 0) {
660 if (is_valid_ether_addr(dev->net->dev_addr)) { 644 if (is_valid_ether_addr(dev->net->dev_addr)) {
661 /* eeprom values are valid so use them */ 645 /* eeprom values are valid so use them */
662 if (netif_msg_ifup(dev)) 646 netif_dbg(dev, ifup, dev->net, "MAC address read from EEPROM\n");
663 devdbg(dev, "MAC address read from EEPROM");
664 return; 647 return;
665 } 648 }
666 } 649 }
667 650
668 /* no eeprom, or eeprom values are invalid. generate random MAC */ 651 /* no eeprom, or eeprom values are invalid. generate random MAC */
669 random_ether_addr(dev->net->dev_addr); 652 random_ether_addr(dev->net->dev_addr);
670 if (netif_msg_ifup(dev)) 653 netif_dbg(dev, ifup, dev->net, "MAC address set to random_ether_addr\n");
671 devdbg(dev, "MAC address set to random_ether_addr");
672} 654}
673 655
674static int smsc95xx_set_mac_address(struct usbnet *dev) 656static int smsc95xx_set_mac_address(struct usbnet *dev)
@@ -680,13 +662,13 @@ static int smsc95xx_set_mac_address(struct usbnet *dev)
680 662
681 ret = smsc95xx_write_reg(dev, ADDRL, addr_lo); 663 ret = smsc95xx_write_reg(dev, ADDRL, addr_lo);
682 if (ret < 0) { 664 if (ret < 0) {
683 devwarn(dev, "Failed to write ADDRL: %d", ret); 665 netdev_warn(dev->net, "Failed to write ADDRL: %d\n", ret);
684 return ret; 666 return ret;
685 } 667 }
686 668
687 ret = smsc95xx_write_reg(dev, ADDRH, addr_hi); 669 ret = smsc95xx_write_reg(dev, ADDRH, addr_hi);
688 if (ret < 0) { 670 if (ret < 0) {
689 devwarn(dev, "Failed to write ADDRH: %d", ret); 671 netdev_warn(dev->net, "Failed to write ADDRH: %d\n", ret);
690 return ret; 672 return ret;
691 } 673 }
692 674
@@ -747,8 +729,7 @@ static int smsc95xx_phy_initialize(struct usbnet *dev)
747 PHY_INT_MASK_DEFAULT_); 729 PHY_INT_MASK_DEFAULT_);
748 mii_nway_restart(&dev->mii); 730 mii_nway_restart(&dev->mii);
749 731
750 if (netif_msg_ifup(dev)) 732 netif_dbg(dev, ifup, dev->net, "phy initialised successfully\n");
751 devdbg(dev, "phy initialised successfully");
752 return 0; 733 return 0;
753} 734}
754 735
@@ -759,14 +740,13 @@ static int smsc95xx_reset(struct usbnet *dev)
759 u32 read_buf, write_buf, burst_cap; 740 u32 read_buf, write_buf, burst_cap;
760 int ret = 0, timeout; 741 int ret = 0, timeout;
761 742
762 if (netif_msg_ifup(dev)) 743 netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n");
763 devdbg(dev, "entering smsc95xx_reset");
764 744
765 write_buf = HW_CFG_LRST_; 745 write_buf = HW_CFG_LRST_;
766 ret = smsc95xx_write_reg(dev, HW_CFG, write_buf); 746 ret = smsc95xx_write_reg(dev, HW_CFG, write_buf);
767 if (ret < 0) { 747 if (ret < 0) {
768 devwarn(dev, "Failed to write HW_CFG_LRST_ bit in HW_CFG " 748 netdev_warn(dev->net, "Failed to write HW_CFG_LRST_ bit in HW_CFG register, ret = %d\n",
769 "register, ret = %d", ret); 749 ret);
770 return ret; 750 return ret;
771 } 751 }
772 752
@@ -774,7 +754,7 @@ static int smsc95xx_reset(struct usbnet *dev)
774 do { 754 do {
775 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); 755 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
776 if (ret < 0) { 756 if (ret < 0) {
777 devwarn(dev, "Failed to read HW_CFG: %d", ret); 757 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
778 return ret; 758 return ret;
779 } 759 }
780 msleep(10); 760 msleep(10);
@@ -782,14 +762,14 @@ static int smsc95xx_reset(struct usbnet *dev)
782 } while ((read_buf & HW_CFG_LRST_) && (timeout < 100)); 762 } while ((read_buf & HW_CFG_LRST_) && (timeout < 100));
783 763
784 if (timeout >= 100) { 764 if (timeout >= 100) {
785 devwarn(dev, "timeout waiting for completion of Lite Reset"); 765 netdev_warn(dev->net, "timeout waiting for completion of Lite Reset\n");
786 return ret; 766 return ret;
787 } 767 }
788 768
789 write_buf = PM_CTL_PHY_RST_; 769 write_buf = PM_CTL_PHY_RST_;
790 ret = smsc95xx_write_reg(dev, PM_CTRL, write_buf); 770 ret = smsc95xx_write_reg(dev, PM_CTRL, write_buf);
791 if (ret < 0) { 771 if (ret < 0) {
792 devwarn(dev, "Failed to write PM_CTRL: %d", ret); 772 netdev_warn(dev->net, "Failed to write PM_CTRL: %d\n", ret);
793 return ret; 773 return ret;
794 } 774 }
795 775
@@ -797,7 +777,7 @@ static int smsc95xx_reset(struct usbnet *dev)
797 do { 777 do {
798 ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf); 778 ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf);
799 if (ret < 0) { 779 if (ret < 0) {
800 devwarn(dev, "Failed to read PM_CTRL: %d", ret); 780 netdev_warn(dev->net, "Failed to read PM_CTRL: %d\n", ret);
801 return ret; 781 return ret;
802 } 782 }
803 msleep(10); 783 msleep(10);
@@ -805,7 +785,7 @@ static int smsc95xx_reset(struct usbnet *dev)
805 } while ((read_buf & PM_CTL_PHY_RST_) && (timeout < 100)); 785 } while ((read_buf & PM_CTL_PHY_RST_) && (timeout < 100));
806 786
807 if (timeout >= 100) { 787 if (timeout >= 100) {
808 devwarn(dev, "timeout waiting for PHY Reset"); 788 netdev_warn(dev->net, "timeout waiting for PHY Reset\n");
809 return ret; 789 return ret;
810 } 790 }
811 791
@@ -815,35 +795,35 @@ static int smsc95xx_reset(struct usbnet *dev)
815 if (ret < 0) 795 if (ret < 0)
816 return ret; 796 return ret;
817 797
818 if (netif_msg_ifup(dev)) 798 netif_dbg(dev, ifup, dev->net,
819 devdbg(dev, "MAC Address: %pM", dev->net->dev_addr); 799 "MAC Address: %pM\n", dev->net->dev_addr);
820 800
821 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); 801 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
822 if (ret < 0) { 802 if (ret < 0) {
823 devwarn(dev, "Failed to read HW_CFG: %d", ret); 803 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
824 return ret; 804 return ret;
825 } 805 }
826 806
827 if (netif_msg_ifup(dev)) 807 netif_dbg(dev, ifup, dev->net,
828 devdbg(dev, "Read Value from HW_CFG : 0x%08x", read_buf); 808 "Read Value from HW_CFG : 0x%08x\n", read_buf);
829 809
830 read_buf |= HW_CFG_BIR_; 810 read_buf |= HW_CFG_BIR_;
831 811
832 ret = smsc95xx_write_reg(dev, HW_CFG, read_buf); 812 ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
833 if (ret < 0) { 813 if (ret < 0) {
834 devwarn(dev, "Failed to write HW_CFG_BIR_ bit in HW_CFG " 814 netdev_warn(dev->net, "Failed to write HW_CFG_BIR_ bit in HW_CFG register, ret = %d\n",
835 "register, ret = %d", ret); 815 ret);
836 return ret; 816 return ret;
837 } 817 }
838 818
839 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); 819 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
840 if (ret < 0) { 820 if (ret < 0) {
841 devwarn(dev, "Failed to read HW_CFG: %d", ret); 821 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
842 return ret; 822 return ret;
843 } 823 }
844 if (netif_msg_ifup(dev)) 824 netif_dbg(dev, ifup, dev->net,
845 devdbg(dev, "Read Value from HW_CFG after writing " 825 "Read Value from HW_CFG after writing HW_CFG_BIR_: 0x%08x\n",
846 "HW_CFG_BIR_: 0x%08x", read_buf); 826 read_buf);
847 827
848 if (!turbo_mode) { 828 if (!turbo_mode) {
849 burst_cap = 0; 829 burst_cap = 0;
@@ -856,47 +836,47 @@ static int smsc95xx_reset(struct usbnet *dev)
856 dev->rx_urb_size = DEFAULT_FS_BURST_CAP_SIZE; 836 dev->rx_urb_size = DEFAULT_FS_BURST_CAP_SIZE;
857 } 837 }
858 838
859 if (netif_msg_ifup(dev)) 839 netif_dbg(dev, ifup, dev->net,
860 devdbg(dev, "rx_urb_size=%ld", (ulong)dev->rx_urb_size); 840 "rx_urb_size=%ld\n", (ulong)dev->rx_urb_size);
861 841
862 ret = smsc95xx_write_reg(dev, BURST_CAP, burst_cap); 842 ret = smsc95xx_write_reg(dev, BURST_CAP, burst_cap);
863 if (ret < 0) { 843 if (ret < 0) {
864 devwarn(dev, "Failed to write BURST_CAP: %d", ret); 844 netdev_warn(dev->net, "Failed to write BURST_CAP: %d\n", ret);
865 return ret; 845 return ret;
866 } 846 }
867 847
868 ret = smsc95xx_read_reg(dev, BURST_CAP, &read_buf); 848 ret = smsc95xx_read_reg(dev, BURST_CAP, &read_buf);
869 if (ret < 0) { 849 if (ret < 0) {
870 devwarn(dev, "Failed to read BURST_CAP: %d", ret); 850 netdev_warn(dev->net, "Failed to read BURST_CAP: %d\n", ret);
871 return ret; 851 return ret;
872 } 852 }
873 if (netif_msg_ifup(dev)) 853 netif_dbg(dev, ifup, dev->net,
874 devdbg(dev, "Read Value from BURST_CAP after writing: 0x%08x", 854 "Read Value from BURST_CAP after writing: 0x%08x\n",
875 read_buf); 855 read_buf);
876 856
877 read_buf = DEFAULT_BULK_IN_DELAY; 857 read_buf = DEFAULT_BULK_IN_DELAY;
878 ret = smsc95xx_write_reg(dev, BULK_IN_DLY, read_buf); 858 ret = smsc95xx_write_reg(dev, BULK_IN_DLY, read_buf);
879 if (ret < 0) { 859 if (ret < 0) {
880 devwarn(dev, "ret = %d", ret); 860 netdev_warn(dev->net, "ret = %d\n", ret);
881 return ret; 861 return ret;
882 } 862 }
883 863
884 ret = smsc95xx_read_reg(dev, BULK_IN_DLY, &read_buf); 864 ret = smsc95xx_read_reg(dev, BULK_IN_DLY, &read_buf);
885 if (ret < 0) { 865 if (ret < 0) {
886 devwarn(dev, "Failed to read BULK_IN_DLY: %d", ret); 866 netdev_warn(dev->net, "Failed to read BULK_IN_DLY: %d\n", ret);
887 return ret; 867 return ret;
888 } 868 }
889 if (netif_msg_ifup(dev)) 869 netif_dbg(dev, ifup, dev->net,
890 devdbg(dev, "Read Value from BULK_IN_DLY after writing: " 870 "Read Value from BULK_IN_DLY after writing: 0x%08x\n",
891 "0x%08x", read_buf); 871 read_buf);
892 872
893 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); 873 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
894 if (ret < 0) { 874 if (ret < 0) {
895 devwarn(dev, "Failed to read HW_CFG: %d", ret); 875 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
896 return ret; 876 return ret;
897 } 877 }
898 if (netif_msg_ifup(dev)) 878 netif_dbg(dev, ifup, dev->net,
899 devdbg(dev, "Read Value from HW_CFG: 0x%08x", read_buf); 879 "Read Value from HW_CFG: 0x%08x\n", read_buf);
900 880
901 if (turbo_mode) 881 if (turbo_mode)
902 read_buf |= (HW_CFG_MEF_ | HW_CFG_BCE_); 882 read_buf |= (HW_CFG_MEF_ | HW_CFG_BCE_);
@@ -908,41 +888,41 @@ static int smsc95xx_reset(struct usbnet *dev)
908 888
909 ret = smsc95xx_write_reg(dev, HW_CFG, read_buf); 889 ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
910 if (ret < 0) { 890 if (ret < 0) {
911 devwarn(dev, "Failed to write HW_CFG register, ret=%d", ret); 891 netdev_warn(dev->net, "Failed to write HW_CFG register, ret=%d\n",
892 ret);
912 return ret; 893 return ret;
913 } 894 }
914 895
915 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); 896 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
916 if (ret < 0) { 897 if (ret < 0) {
917 devwarn(dev, "Failed to read HW_CFG: %d", ret); 898 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
918 return ret; 899 return ret;
919 } 900 }
920 if (netif_msg_ifup(dev)) 901 netif_dbg(dev, ifup, dev->net,
921 devdbg(dev, "Read Value from HW_CFG after writing: 0x%08x", 902 "Read Value from HW_CFG after writing: 0x%08x\n", read_buf);
922 read_buf);
923 903
924 write_buf = 0xFFFFFFFF; 904 write_buf = 0xFFFFFFFF;
925 ret = smsc95xx_write_reg(dev, INT_STS, write_buf); 905 ret = smsc95xx_write_reg(dev, INT_STS, write_buf);
926 if (ret < 0) { 906 if (ret < 0) {
927 devwarn(dev, "Failed to write INT_STS register, ret=%d", ret); 907 netdev_warn(dev->net, "Failed to write INT_STS register, ret=%d\n",
908 ret);
928 return ret; 909 return ret;
929 } 910 }
930 911
931 ret = smsc95xx_read_reg(dev, ID_REV, &read_buf); 912 ret = smsc95xx_read_reg(dev, ID_REV, &read_buf);
932 if (ret < 0) { 913 if (ret < 0) {
933 devwarn(dev, "Failed to read ID_REV: %d", ret); 914 netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret);
934 return ret; 915 return ret;
935 } 916 }
936 if (netif_msg_ifup(dev)) 917 netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf);
937 devdbg(dev, "ID_REV = 0x%08x", read_buf);
938 918
939 /* Configure GPIO pins as LED outputs */ 919 /* Configure GPIO pins as LED outputs */
940 write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED | 920 write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
941 LED_GPIO_CFG_FDX_LED; 921 LED_GPIO_CFG_FDX_LED;
942 ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf); 922 ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf);
943 if (ret < 0) { 923 if (ret < 0) {
944 devwarn(dev, "Failed to write LED_GPIO_CFG register, ret=%d", 924 netdev_warn(dev->net, "Failed to write LED_GPIO_CFG register, ret=%d\n",
945 ret); 925 ret);
946 return ret; 926 return ret;
947 } 927 }
948 928
@@ -950,21 +930,21 @@ static int smsc95xx_reset(struct usbnet *dev)
950 write_buf = 0; 930 write_buf = 0;
951 ret = smsc95xx_write_reg(dev, FLOW, write_buf); 931 ret = smsc95xx_write_reg(dev, FLOW, write_buf);
952 if (ret < 0) { 932 if (ret < 0) {
953 devwarn(dev, "Failed to write FLOW: %d", ret); 933 netdev_warn(dev->net, "Failed to write FLOW: %d\n", ret);
954 return ret; 934 return ret;
955 } 935 }
956 936
957 read_buf = AFC_CFG_DEFAULT; 937 read_buf = AFC_CFG_DEFAULT;
958 ret = smsc95xx_write_reg(dev, AFC_CFG, read_buf); 938 ret = smsc95xx_write_reg(dev, AFC_CFG, read_buf);
959 if (ret < 0) { 939 if (ret < 0) {
960 devwarn(dev, "Failed to write AFC_CFG: %d", ret); 940 netdev_warn(dev->net, "Failed to write AFC_CFG: %d\n", ret);
961 return ret; 941 return ret;
962 } 942 }
963 943
964 /* Don't need mac_cr_lock during initialisation */ 944 /* Don't need mac_cr_lock during initialisation */
965 ret = smsc95xx_read_reg(dev, MAC_CR, &pdata->mac_cr); 945 ret = smsc95xx_read_reg(dev, MAC_CR, &pdata->mac_cr);
966 if (ret < 0) { 946 if (ret < 0) {
967 devwarn(dev, "Failed to read MAC_CR: %d", ret); 947 netdev_warn(dev->net, "Failed to read MAC_CR: %d\n", ret);
968 return ret; 948 return ret;
969 } 949 }
970 950
@@ -973,7 +953,7 @@ static int smsc95xx_reset(struct usbnet *dev)
973 write_buf = (u32)ETH_P_8021Q; 953 write_buf = (u32)ETH_P_8021Q;
974 ret = smsc95xx_write_reg(dev, VLAN1, write_buf); 954 ret = smsc95xx_write_reg(dev, VLAN1, write_buf);
975 if (ret < 0) { 955 if (ret < 0) {
976 devwarn(dev, "Failed to write VAN1: %d", ret); 956 netdev_warn(dev->net, "Failed to write VAN1: %d\n", ret);
977 return ret; 957 return ret;
978 } 958 }
979 959
@@ -981,7 +961,7 @@ static int smsc95xx_reset(struct usbnet *dev)
981 ethtool_op_set_tx_hw_csum(netdev, pdata->use_tx_csum); 961 ethtool_op_set_tx_hw_csum(netdev, pdata->use_tx_csum);
982 ret = smsc95xx_set_csums(dev); 962 ret = smsc95xx_set_csums(dev);
983 if (ret < 0) { 963 if (ret < 0) {
984 devwarn(dev, "Failed to set csum offload: %d", ret); 964 netdev_warn(dev->net, "Failed to set csum offload: %d\n", ret);
985 return ret; 965 return ret;
986 } 966 }
987 967
@@ -992,7 +972,7 @@ static int smsc95xx_reset(struct usbnet *dev)
992 972
993 ret = smsc95xx_read_reg(dev, INT_EP_CTL, &read_buf); 973 ret = smsc95xx_read_reg(dev, INT_EP_CTL, &read_buf);
994 if (ret < 0) { 974 if (ret < 0) {
995 devwarn(dev, "Failed to read INT_EP_CTL: %d", ret); 975 netdev_warn(dev->net, "Failed to read INT_EP_CTL: %d\n", ret);
996 return ret; 976 return ret;
997 } 977 }
998 978
@@ -1001,15 +981,14 @@ static int smsc95xx_reset(struct usbnet *dev)
1001 981
1002 ret = smsc95xx_write_reg(dev, INT_EP_CTL, read_buf); 982 ret = smsc95xx_write_reg(dev, INT_EP_CTL, read_buf);
1003 if (ret < 0) { 983 if (ret < 0) {
1004 devwarn(dev, "Failed to write INT_EP_CTL: %d", ret); 984 netdev_warn(dev->net, "Failed to write INT_EP_CTL: %d\n", ret);
1005 return ret; 985 return ret;
1006 } 986 }
1007 987
1008 smsc95xx_start_tx_path(dev); 988 smsc95xx_start_tx_path(dev);
1009 smsc95xx_start_rx_path(dev); 989 smsc95xx_start_rx_path(dev);
1010 990
1011 if (netif_msg_ifup(dev)) 991 netif_dbg(dev, ifup, dev->net, "smsc95xx_reset, return 0\n");
1012 devdbg(dev, "smsc95xx_reset, return 0");
1013 return 0; 992 return 0;
1014} 993}
1015 994
@@ -1034,7 +1013,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
1034 1013
1035 ret = usbnet_get_endpoints(dev, intf); 1014 ret = usbnet_get_endpoints(dev, intf);
1036 if (ret < 0) { 1015 if (ret < 0) {
1037 devwarn(dev, "usbnet_get_endpoints failed: %d", ret); 1016 netdev_warn(dev->net, "usbnet_get_endpoints failed: %d\n", ret);
1038 return ret; 1017 return ret;
1039 } 1018 }
1040 1019
@@ -1043,7 +1022,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
1043 1022
1044 pdata = (struct smsc95xx_priv *)(dev->data[0]); 1023 pdata = (struct smsc95xx_priv *)(dev->data[0]);
1045 if (!pdata) { 1024 if (!pdata) {
1046 devwarn(dev, "Unable to allocate struct smsc95xx_priv"); 1025 netdev_warn(dev->net, "Unable to allocate struct smsc95xx_priv\n");
1047 return -ENOMEM; 1026 return -ENOMEM;
1048 } 1027 }
1049 1028
@@ -1066,8 +1045,7 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
1066{ 1045{
1067 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 1046 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
1068 if (pdata) { 1047 if (pdata) {
1069 if (netif_msg_ifdown(dev)) 1048 netif_dbg(dev, ifdown, dev->net, "free pdata\n");
1070 devdbg(dev, "free pdata");
1071 kfree(pdata); 1049 kfree(pdata);
1072 pdata = NULL; 1050 pdata = NULL;
1073 dev->data[0] = 0; 1051 dev->data[0] = 0;
@@ -1101,8 +1079,8 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1101 align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4; 1079 align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4;
1102 1080
1103 if (unlikely(header & RX_STS_ES_)) { 1081 if (unlikely(header & RX_STS_ES_)) {
1104 if (netif_msg_rx_err(dev)) 1082 netif_dbg(dev, rx_err, dev->net,
1105 devdbg(dev, "Error header=0x%08x", header); 1083 "Error header=0x%08x\n", header);
1106 dev->net->stats.rx_errors++; 1084 dev->net->stats.rx_errors++;
1107 dev->net->stats.rx_dropped++; 1085 dev->net->stats.rx_dropped++;
1108 1086
@@ -1119,9 +1097,8 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1119 } else { 1097 } else {
1120 /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */ 1098 /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
1121 if (unlikely(size > (ETH_FRAME_LEN + 12))) { 1099 if (unlikely(size > (ETH_FRAME_LEN + 12))) {
1122 if (netif_msg_rx_err(dev)) 1100 netif_dbg(dev, rx_err, dev->net,
1123 devdbg(dev, "size err header=0x%08x", 1101 "size err header=0x%08x\n", header);
1124 header);
1125 return 0; 1102 return 0;
1126 } 1103 }
1127 1104
@@ -1137,7 +1114,7 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1137 1114
1138 ax_skb = skb_clone(skb, GFP_ATOMIC); 1115 ax_skb = skb_clone(skb, GFP_ATOMIC);
1139 if (unlikely(!ax_skb)) { 1116 if (unlikely(!ax_skb)) {
1140 devwarn(dev, "Error allocating skb"); 1117 netdev_warn(dev->net, "Error allocating skb\n");
1141 return 0; 1118 return 0;
1142 } 1119 }
1143 1120
@@ -1161,7 +1138,7 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1161 } 1138 }
1162 1139
1163 if (unlikely(skb->len < 0)) { 1140 if (unlikely(skb->len < 0)) {
1164 devwarn(dev, "invalid rx length<0 %d", skb->len); 1141 netdev_warn(dev->net, "invalid rx length<0 %d\n", skb->len);
1165 return 0; 1142 return 0;
1166 } 1143 }
1167 1144
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 035fab04c0a0..17b6a62d206e 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -242,13 +242,13 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
242 dev->net->stats.rx_packets++; 242 dev->net->stats.rx_packets++;
243 dev->net->stats.rx_bytes += skb->len; 243 dev->net->stats.rx_bytes += skb->len;
244 244
245 if (netif_msg_rx_status (dev)) 245 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
246 devdbg (dev, "< rx, len %zu, type 0x%x", 246 skb->len + sizeof (struct ethhdr), skb->protocol);
247 skb->len + sizeof (struct ethhdr), skb->protocol);
248 memset (skb->cb, 0, sizeof (struct skb_data)); 247 memset (skb->cb, 0, sizeof (struct skb_data));
249 status = netif_rx (skb); 248 status = netif_rx (skb);
250 if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev)) 249 if (status != NET_RX_SUCCESS)
251 devdbg (dev, "netif_rx status %d", status); 250 netif_dbg(dev, rx_err, dev->net,
251 "netif_rx status %d\n", status);
252} 252}
253EXPORT_SYMBOL_GPL(usbnet_skb_return); 253EXPORT_SYMBOL_GPL(usbnet_skb_return);
254 254
@@ -313,9 +313,9 @@ void usbnet_defer_kevent (struct usbnet *dev, int work)
313{ 313{
314 set_bit (work, &dev->flags); 314 set_bit (work, &dev->flags);
315 if (!schedule_work (&dev->kevent)) 315 if (!schedule_work (&dev->kevent))
316 deverr (dev, "kevent %d may have been dropped", work); 316 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
317 else 317 else
318 devdbg (dev, "kevent %d scheduled", work); 318 netdev_dbg(dev->net, "kevent %d scheduled\n", work);
319} 319}
320EXPORT_SYMBOL_GPL(usbnet_defer_kevent); 320EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
321 321
@@ -332,8 +332,7 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
332 size_t size = dev->rx_urb_size; 332 size_t size = dev->rx_urb_size;
333 333
334 if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) { 334 if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) {
335 if (netif_msg_rx_err (dev)) 335 netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
336 devdbg (dev, "no rx skb");
337 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 336 usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
338 usb_free_urb (urb); 337 usb_free_urb (urb);
339 return; 338 return;
@@ -363,21 +362,19 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
363 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 362 usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
364 break; 363 break;
365 case -ENODEV: 364 case -ENODEV:
366 if (netif_msg_ifdown (dev)) 365 netif_dbg(dev, ifdown, dev->net, "device gone\n");
367 devdbg (dev, "device gone");
368 netif_device_detach (dev->net); 366 netif_device_detach (dev->net);
369 break; 367 break;
370 default: 368 default:
371 if (netif_msg_rx_err (dev)) 369 netif_dbg(dev, rx_err, dev->net,
372 devdbg (dev, "rx submit, %d", retval); 370 "rx submit, %d\n", retval);
373 tasklet_schedule (&dev->bh); 371 tasklet_schedule (&dev->bh);
374 break; 372 break;
375 case 0: 373 case 0:
376 __skb_queue_tail (&dev->rxq, skb); 374 __skb_queue_tail (&dev->rxq, skb);
377 } 375 }
378 } else { 376 } else {
379 if (netif_msg_ifdown (dev)) 377 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
380 devdbg (dev, "rx: stopped");
381 retval = -ENOLINK; 378 retval = -ENOLINK;
382 } 379 }
383 spin_unlock_irqrestore (&dev->rxq.lock, lockflags); 380 spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
@@ -400,8 +397,7 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
400 if (skb->len) 397 if (skb->len)
401 usbnet_skb_return (dev, skb); 398 usbnet_skb_return (dev, skb);
402 else { 399 else {
403 if (netif_msg_rx_err (dev)) 400 netif_dbg(dev, rx_err, dev->net, "drop\n");
404 devdbg (dev, "drop");
405error: 401error:
406 dev->net->stats.rx_errors++; 402 dev->net->stats.rx_errors++;
407 skb_queue_tail (&dev->done, skb); 403 skb_queue_tail (&dev->done, skb);
@@ -428,8 +424,8 @@ static void rx_complete (struct urb *urb)
428 entry->state = rx_cleanup; 424 entry->state = rx_cleanup;
429 dev->net->stats.rx_errors++; 425 dev->net->stats.rx_errors++;
430 dev->net->stats.rx_length_errors++; 426 dev->net->stats.rx_length_errors++;
431 if (netif_msg_rx_err (dev)) 427 netif_dbg(dev, rx_err, dev->net,
432 devdbg (dev, "rx length %d", skb->len); 428 "rx length %d\n", skb->len);
433 } 429 }
434 break; 430 break;
435 431
@@ -446,8 +442,8 @@ static void rx_complete (struct urb *urb)
446 /* software-driven interface shutdown */ 442 /* software-driven interface shutdown */
447 case -ECONNRESET: /* async unlink */ 443 case -ECONNRESET: /* async unlink */
448 case -ESHUTDOWN: /* hardware gone */ 444 case -ESHUTDOWN: /* hardware gone */
449 if (netif_msg_ifdown (dev)) 445 netif_dbg(dev, ifdown, dev->net,
450 devdbg (dev, "rx shutdown, code %d", urb_status); 446 "rx shutdown, code %d\n", urb_status);
451 goto block; 447 goto block;
452 448
453 /* we get controller i/o faults during khubd disconnect() delays. 449 /* we get controller i/o faults during khubd disconnect() delays.
@@ -460,8 +456,8 @@ static void rx_complete (struct urb *urb)
460 dev->net->stats.rx_errors++; 456 dev->net->stats.rx_errors++;
461 if (!timer_pending (&dev->delay)) { 457 if (!timer_pending (&dev->delay)) {
462 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); 458 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
463 if (netif_msg_link (dev)) 459 netif_dbg(dev, link, dev->net,
464 devdbg (dev, "rx throttle %d", urb_status); 460 "rx throttle %d\n", urb_status);
465 } 461 }
466block: 462block:
467 entry->state = rx_cleanup; 463 entry->state = rx_cleanup;
@@ -477,8 +473,7 @@ block:
477 default: 473 default:
478 entry->state = rx_cleanup; 474 entry->state = rx_cleanup;
479 dev->net->stats.rx_errors++; 475 dev->net->stats.rx_errors++;
480 if (netif_msg_rx_err (dev)) 476 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
481 devdbg (dev, "rx status %d", urb_status);
482 break; 477 break;
483 } 478 }
484 479
@@ -492,8 +487,7 @@ block:
492 } 487 }
493 usb_free_urb (urb); 488 usb_free_urb (urb);
494 } 489 }
495 if (netif_msg_rx_err (dev)) 490 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
496 devdbg (dev, "no read resubmitted");
497} 491}
498 492
499static void intr_complete (struct urb *urb) 493static void intr_complete (struct urb *urb)
@@ -510,15 +504,15 @@ static void intr_complete (struct urb *urb)
510 /* software-driven interface shutdown */ 504 /* software-driven interface shutdown */
511 case -ENOENT: /* urb killed */ 505 case -ENOENT: /* urb killed */
512 case -ESHUTDOWN: /* hardware gone */ 506 case -ESHUTDOWN: /* hardware gone */
513 if (netif_msg_ifdown (dev)) 507 netif_dbg(dev, ifdown, dev->net,
514 devdbg (dev, "intr shutdown, code %d", status); 508 "intr shutdown, code %d\n", status);
515 return; 509 return;
516 510
517 /* NOTE: not throttling like RX/TX, since this endpoint 511 /* NOTE: not throttling like RX/TX, since this endpoint
518 * already polls infrequently 512 * already polls infrequently
519 */ 513 */
520 default: 514 default:
521 devdbg (dev, "intr status %d", status); 515 netdev_dbg(dev->net, "intr status %d\n", status);
522 break; 516 break;
523 } 517 }
524 518
@@ -527,8 +521,9 @@ static void intr_complete (struct urb *urb)
527 521
528 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length); 522 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
529 status = usb_submit_urb (urb, GFP_ATOMIC); 523 status = usb_submit_urb (urb, GFP_ATOMIC);
530 if (status != 0 && netif_msg_timer (dev)) 524 if (status != 0)
531 deverr(dev, "intr resubmit --> %d", status); 525 netif_err(dev, timer, dev->net,
526 "intr resubmit --> %d\n", status);
532} 527}
533 528
534/*-------------------------------------------------------------------------*/ 529/*-------------------------------------------------------------------------*/
@@ -536,8 +531,7 @@ void usbnet_pause_rx(struct usbnet *dev)
536{ 531{
537 set_bit(EVENT_RX_PAUSED, &dev->flags); 532 set_bit(EVENT_RX_PAUSED, &dev->flags);
538 533
539 if (netif_msg_rx_status(dev)) 534 netif_dbg(dev, rx_status, dev->net, "paused rx queue enabled\n");
540 devdbg(dev, "paused rx queue enabled");
541} 535}
542EXPORT_SYMBOL_GPL(usbnet_pause_rx); 536EXPORT_SYMBOL_GPL(usbnet_pause_rx);
543 537
@@ -555,8 +549,8 @@ void usbnet_resume_rx(struct usbnet *dev)
555 549
556 tasklet_schedule(&dev->bh); 550 tasklet_schedule(&dev->bh);
557 551
558 if (netif_msg_rx_status(dev)) 552 netif_dbg(dev, rx_status, dev->net,
559 devdbg(dev, "paused rx queue disabled, %d skbs requeued", num); 553 "paused rx queue disabled, %d skbs requeued\n", num);
560} 554}
561EXPORT_SYMBOL_GPL(usbnet_resume_rx); 555EXPORT_SYMBOL_GPL(usbnet_resume_rx);
562 556
@@ -589,7 +583,7 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
589 // these (async) unlinks complete immediately 583 // these (async) unlinks complete immediately
590 retval = usb_unlink_urb (urb); 584 retval = usb_unlink_urb (urb);
591 if (retval != -EINPROGRESS && retval != 0) 585 if (retval != -EINPROGRESS && retval != 0)
592 devdbg (dev, "unlink urb err, %d", retval); 586 netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
593 else 587 else
594 count++; 588 count++;
595 } 589 }
@@ -631,9 +625,8 @@ static void usbnet_terminate_urbs(struct usbnet *dev)
631 && !skb_queue_empty(&dev->done)) { 625 && !skb_queue_empty(&dev->done)) {
632 schedule_timeout(UNLINK_TIMEOUT_MS); 626 schedule_timeout(UNLINK_TIMEOUT_MS);
633 set_current_state(TASK_UNINTERRUPTIBLE); 627 set_current_state(TASK_UNINTERRUPTIBLE);
634 if (netif_msg_ifdown(dev)) 628 netif_dbg(dev, ifdown, dev->net,
635 devdbg(dev, "waited for %d urb completions", 629 "waited for %d urb completions\n", temp);
636 temp);
637 } 630 }
638 set_current_state(TASK_RUNNING); 631 set_current_state(TASK_RUNNING);
639 dev->wait = NULL; 632 dev->wait = NULL;
@@ -648,22 +641,21 @@ int usbnet_stop (struct net_device *net)
648 641
649 netif_stop_queue (net); 642 netif_stop_queue (net);
650 643
651 if (netif_msg_ifdown (dev)) 644 netif_info(dev, ifdown, dev->net,
652 devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld", 645 "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
653 net->stats.rx_packets, net->stats.tx_packets, 646 net->stats.rx_packets, net->stats.tx_packets,
654 net->stats.rx_errors, net->stats.tx_errors 647 net->stats.rx_errors, net->stats.tx_errors);
655 );
656 648
657 /* allow minidriver to stop correctly (wireless devices to turn off 649 /* allow minidriver to stop correctly (wireless devices to turn off
658 * radio etc) */ 650 * radio etc) */
659 if (info->stop) { 651 if (info->stop) {
660 retval = info->stop(dev); 652 retval = info->stop(dev);
661 if (retval < 0 && netif_msg_ifdown(dev)) 653 if (retval < 0)
662 devinfo(dev, 654 netif_info(dev, ifdown, dev->net,
663 "stop fail (%d) usbnet usb-%s-%s, %s", 655 "stop fail (%d) usbnet usb-%s-%s, %s\n",
664 retval, 656 retval,
665 dev->udev->bus->bus_name, dev->udev->devpath, 657 dev->udev->bus->bus_name, dev->udev->devpath,
666 info->description); 658 info->description);
667 } 659 }
668 660
669 if (!(info->flags & FLAG_AVOID_UNLINK_URBS)) 661 if (!(info->flags & FLAG_AVOID_UNLINK_URBS))
@@ -702,30 +694,29 @@ int usbnet_open (struct net_device *net)
702 struct driver_info *info = dev->driver_info; 694 struct driver_info *info = dev->driver_info;
703 695
704 if ((retval = usb_autopm_get_interface(dev->intf)) < 0) { 696 if ((retval = usb_autopm_get_interface(dev->intf)) < 0) {
705 if (netif_msg_ifup (dev)) 697 netif_info(dev, ifup, dev->net,
706 devinfo (dev, 698 "resumption fail (%d) usbnet usb-%s-%s, %s\n",
707 "resumption fail (%d) usbnet usb-%s-%s, %s", 699 retval,
708 retval, 700 dev->udev->bus->bus_name,
709 dev->udev->bus->bus_name, dev->udev->devpath, 701 dev->udev->devpath,
710 info->description); 702 info->description);
711 goto done_nopm; 703 goto done_nopm;
712 } 704 }
713 705
714 // put into "known safe" state 706 // put into "known safe" state
715 if (info->reset && (retval = info->reset (dev)) < 0) { 707 if (info->reset && (retval = info->reset (dev)) < 0) {
716 if (netif_msg_ifup (dev)) 708 netif_info(dev, ifup, dev->net,
717 devinfo (dev, 709 "open reset fail (%d) usbnet usb-%s-%s, %s\n",
718 "open reset fail (%d) usbnet usb-%s-%s, %s", 710 retval,
719 retval, 711 dev->udev->bus->bus_name,
720 dev->udev->bus->bus_name, dev->udev->devpath, 712 dev->udev->devpath,
721 info->description); 713 info->description);
722 goto done; 714 goto done;
723 } 715 }
724 716
725 // insist peer be connected 717 // insist peer be connected
726 if (info->check_connect && (retval = info->check_connect (dev)) < 0) { 718 if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
727 if (netif_msg_ifup (dev)) 719 netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval);
728 devdbg (dev, "can't open; %d", retval);
729 goto done; 720 goto done;
730 } 721 }
731 722
@@ -733,34 +724,23 @@ int usbnet_open (struct net_device *net)
733 if (dev->interrupt) { 724 if (dev->interrupt) {
734 retval = usb_submit_urb (dev->interrupt, GFP_KERNEL); 725 retval = usb_submit_urb (dev->interrupt, GFP_KERNEL);
735 if (retval < 0) { 726 if (retval < 0) {
736 if (netif_msg_ifup (dev)) 727 netif_err(dev, ifup, dev->net,
737 deverr (dev, "intr submit %d", retval); 728 "intr submit %d\n", retval);
738 goto done; 729 goto done;
739 } 730 }
740 } 731 }
741 732
742 netif_start_queue (net); 733 netif_start_queue (net);
743 if (netif_msg_ifup (dev)) { 734 netif_info(dev, ifup, dev->net,
744 char *framing; 735 "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n",
745 736 (int)RX_QLEN(dev), (int)TX_QLEN(dev),
746 if (dev->driver_info->flags & FLAG_FRAMING_NC) 737 dev->net->mtu,
747 framing = "NetChip"; 738 (dev->driver_info->flags & FLAG_FRAMING_NC) ? "NetChip" :
748 else if (dev->driver_info->flags & FLAG_FRAMING_GL) 739 (dev->driver_info->flags & FLAG_FRAMING_GL) ? "GeneSys" :
749 framing = "GeneSys"; 740 (dev->driver_info->flags & FLAG_FRAMING_Z) ? "Zaurus" :
750 else if (dev->driver_info->flags & FLAG_FRAMING_Z) 741 (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" :
751 framing = "Zaurus"; 742 (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
752 else if (dev->driver_info->flags & FLAG_FRAMING_RN) 743 "simple");
753 framing = "RNDIS";
754 else if (dev->driver_info->flags & FLAG_FRAMING_AX)
755 framing = "ASIX";
756 else
757 framing = "simple";
758
759 devinfo (dev, "open: enable queueing "
760 "(rx %d, tx %d) mtu %d %s framing",
761 (int)RX_QLEN (dev), (int)TX_QLEN (dev), dev->net->mtu,
762 framing);
763 }
764 744
765 // delay posting reads until we're fully open 745 // delay posting reads until we're fully open
766 tasklet_schedule (&dev->bh); 746 tasklet_schedule (&dev->bh);
@@ -771,6 +751,7 @@ int usbnet_open (struct net_device *net)
771 usb_autopm_put_interface(dev->intf); 751 usb_autopm_put_interface(dev->intf);
772 } 752 }
773 return retval; 753 return retval;
754
774done: 755done:
775 usb_autopm_put_interface(dev->intf); 756 usb_autopm_put_interface(dev->intf);
776done_nopm: 757done_nopm:
@@ -908,8 +889,8 @@ kevent (struct work_struct *work)
908 status != -ESHUTDOWN) { 889 status != -ESHUTDOWN) {
909 if (netif_msg_tx_err (dev)) 890 if (netif_msg_tx_err (dev))
910fail_pipe: 891fail_pipe:
911 deverr (dev, "can't clear tx halt, status %d", 892 netdev_err(dev->net, "can't clear tx halt, status %d\n",
912 status); 893 status);
913 } else { 894 } else {
914 clear_bit (EVENT_TX_HALT, &dev->flags); 895 clear_bit (EVENT_TX_HALT, &dev->flags);
915 if (status != -ESHUTDOWN) 896 if (status != -ESHUTDOWN)
@@ -928,8 +909,8 @@ fail_pipe:
928 status != -ESHUTDOWN) { 909 status != -ESHUTDOWN) {
929 if (netif_msg_rx_err (dev)) 910 if (netif_msg_rx_err (dev))
930fail_halt: 911fail_halt:
931 deverr (dev, "can't clear rx halt, status %d", 912 netdev_err(dev->net, "can't clear rx halt, status %d\n",
932 status); 913 status);
933 } else { 914 } else {
934 clear_bit (EVENT_RX_HALT, &dev->flags); 915 clear_bit (EVENT_RX_HALT, &dev->flags);
935 tasklet_schedule (&dev->bh); 916 tasklet_schedule (&dev->bh);
@@ -967,18 +948,18 @@ fail_lowmem:
967 if(info->link_reset && (retval = info->link_reset(dev)) < 0) { 948 if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
968 usb_autopm_put_interface(dev->intf); 949 usb_autopm_put_interface(dev->intf);
969skip_reset: 950skip_reset:
970 devinfo(dev, "link reset failed (%d) usbnet usb-%s-%s, %s", 951 netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n",
971 retval, 952 retval,
972 dev->udev->bus->bus_name, dev->udev->devpath, 953 dev->udev->bus->bus_name,
973 info->description); 954 dev->udev->devpath,
955 info->description);
974 } else { 956 } else {
975 usb_autopm_put_interface(dev->intf); 957 usb_autopm_put_interface(dev->intf);
976 } 958 }
977 } 959 }
978 960
979 if (dev->flags) 961 if (dev->flags)
980 devdbg (dev, "kevent done, flags = 0x%lx", 962 netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags);
981 dev->flags);
982} 963}
983 964
984/*-------------------------------------------------------------------------*/ 965/*-------------------------------------------------------------------------*/
@@ -1014,15 +995,14 @@ static void tx_complete (struct urb *urb)
1014 if (!timer_pending (&dev->delay)) { 995 if (!timer_pending (&dev->delay)) {
1015 mod_timer (&dev->delay, 996 mod_timer (&dev->delay,
1016 jiffies + THROTTLE_JIFFIES); 997 jiffies + THROTTLE_JIFFIES);
1017 if (netif_msg_link (dev)) 998 netif_dbg(dev, link, dev->net,
1018 devdbg (dev, "tx throttle %d", 999 "tx throttle %d\n", urb->status);
1019 urb->status);
1020 } 1000 }
1021 netif_stop_queue (dev->net); 1001 netif_stop_queue (dev->net);
1022 break; 1002 break;
1023 default: 1003 default:
1024 if (netif_msg_tx_err (dev)) 1004 netif_dbg(dev, tx_err, dev->net,
1025 devdbg (dev, "tx err %d", entry->urb->status); 1005 "tx err %d\n", entry->urb->status);
1026 break; 1006 break;
1027 } 1007 }
1028 } 1008 }
@@ -1064,16 +1044,14 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1064 if (info->tx_fixup) { 1044 if (info->tx_fixup) {
1065 skb = info->tx_fixup (dev, skb, GFP_ATOMIC); 1045 skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
1066 if (!skb) { 1046 if (!skb) {
1067 if (netif_msg_tx_err (dev)) 1047 netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
1068 devdbg (dev, "can't tx_fixup skb");
1069 goto drop; 1048 goto drop;
1070 } 1049 }
1071 } 1050 }
1072 length = skb->len; 1051 length = skb->len;
1073 1052
1074 if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) { 1053 if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
1075 if (netif_msg_tx_err (dev)) 1054 netif_dbg(dev, tx_err, dev->net, "no urb\n");
1076 devdbg (dev, "no urb");
1077 goto drop; 1055 goto drop;
1078 } 1056 }
1079 1057
@@ -1113,7 +1091,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1113 /* no use to process more packets */ 1091 /* no use to process more packets */
1114 netif_stop_queue(net); 1092 netif_stop_queue(net);
1115 spin_unlock_irqrestore(&dev->txq.lock, flags); 1093 spin_unlock_irqrestore(&dev->txq.lock, flags);
1116 devdbg(dev, "Delaying transmission for resumption"); 1094 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
1117 goto deferred; 1095 goto deferred;
1118 } 1096 }
1119#endif 1097#endif
@@ -1126,8 +1104,8 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1126 break; 1104 break;
1127 default: 1105 default:
1128 usb_autopm_put_interface_async(dev->intf); 1106 usb_autopm_put_interface_async(dev->intf);
1129 if (netif_msg_tx_err (dev)) 1107 netif_dbg(dev, tx_err, dev->net,
1130 devdbg (dev, "tx: submit urb err %d", retval); 1108 "tx: submit urb err %d\n", retval);
1131 break; 1109 break;
1132 case 0: 1110 case 0:
1133 net->trans_start = jiffies; 1111 net->trans_start = jiffies;
@@ -1138,17 +1116,15 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1138 spin_unlock_irqrestore (&dev->txq.lock, flags); 1116 spin_unlock_irqrestore (&dev->txq.lock, flags);
1139 1117
1140 if (retval) { 1118 if (retval) {
1141 if (netif_msg_tx_err (dev)) 1119 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
1142 devdbg (dev, "drop, code %d", retval);
1143drop: 1120drop:
1144 dev->net->stats.tx_dropped++; 1121 dev->net->stats.tx_dropped++;
1145 if (skb) 1122 if (skb)
1146 dev_kfree_skb_any (skb); 1123 dev_kfree_skb_any (skb);
1147 usb_free_urb (urb); 1124 usb_free_urb (urb);
1148 } else if (netif_msg_tx_queued (dev)) { 1125 } else
1149 devdbg (dev, "> tx, len %d, type 0x%x", 1126 netif_dbg(dev, tx_queued, dev->net,
1150 length, skb->protocol); 1127 "> tx, len %d, type 0x%x\n", length, skb->protocol);
1151 }
1152#ifdef CONFIG_PM 1128#ifdef CONFIG_PM
1153deferred: 1129deferred:
1154#endif 1130#endif
@@ -1179,7 +1155,7 @@ static void usbnet_bh (unsigned long param)
1179 dev_kfree_skb (skb); 1155 dev_kfree_skb (skb);
1180 continue; 1156 continue;
1181 default: 1157 default:
1182 devdbg (dev, "bogus skb state %d", entry->state); 1158 netdev_dbg(dev->net, "bogus skb state %d\n", entry->state);
1183 } 1159 }
1184 } 1160 }
1185 1161
@@ -1207,9 +1183,10 @@ static void usbnet_bh (unsigned long param)
1207 if (urb != NULL) 1183 if (urb != NULL)
1208 rx_submit (dev, urb, GFP_ATOMIC); 1184 rx_submit (dev, urb, GFP_ATOMIC);
1209 } 1185 }
1210 if (temp != dev->rxq.qlen && netif_msg_link (dev)) 1186 if (temp != dev->rxq.qlen)
1211 devdbg (dev, "rxqlen %d --> %d", 1187 netif_dbg(dev, link, dev->net,
1212 temp, dev->rxq.qlen); 1188 "rxqlen %d --> %d\n",
1189 temp, dev->rxq.qlen);
1213 if (dev->rxq.qlen < qlen) 1190 if (dev->rxq.qlen < qlen)
1214 tasklet_schedule (&dev->bh); 1191 tasklet_schedule (&dev->bh);
1215 } 1192 }
@@ -1240,11 +1217,10 @@ void usbnet_disconnect (struct usb_interface *intf)
1240 1217
1241 xdev = interface_to_usbdev (intf); 1218 xdev = interface_to_usbdev (intf);
1242 1219
1243 if (netif_msg_probe (dev)) 1220 netif_info(dev, probe, dev->net, "unregister '%s' usb-%s-%s, %s\n",
1244 devinfo (dev, "unregister '%s' usb-%s-%s, %s", 1221 intf->dev.driver->name,
1245 intf->dev.driver->name, 1222 xdev->bus->bus_name, xdev->devpath,
1246 xdev->bus->bus_name, xdev->devpath, 1223 dev->driver_info->description);
1247 dev->driver_info->description);
1248 1224
1249 net = dev->net; 1225 net = dev->net;
1250 unregister_netdev (net); 1226 unregister_netdev (net);
@@ -1407,12 +1383,12 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1407 status = register_netdev (net); 1383 status = register_netdev (net);
1408 if (status) 1384 if (status)
1409 goto out3; 1385 goto out3;
1410 if (netif_msg_probe (dev)) 1386 netif_info(dev, probe, dev->net,
1411 devinfo (dev, "register '%s' at usb-%s-%s, %s, %pM", 1387 "register '%s' at usb-%s-%s, %s, %pM\n",
1412 udev->dev.driver->name, 1388 udev->dev.driver->name,
1413 xdev->bus->bus_name, xdev->devpath, 1389 xdev->bus->bus_name, xdev->devpath,
1414 dev->driver_info->description, 1390 dev->driver_info->description,
1415 net->dev_addr); 1391 net->dev_addr);
1416 1392
1417 // ok, it's ready to go. 1393 // ok, it's ready to go.
1418 usb_set_intfdata (udev, dev); 1394 usb_set_intfdata (udev, dev);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 3a15de56df9c..b583d4968add 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -34,7 +34,7 @@ struct veth_net_stats {
34 34
35struct veth_priv { 35struct veth_priv {
36 struct net_device *peer; 36 struct net_device *peer;
37 struct veth_net_stats *stats; 37 struct veth_net_stats __percpu *stats;
38 unsigned ip_summed; 38 unsigned ip_summed;
39}; 39};
40 40
@@ -263,7 +263,7 @@ static int veth_change_mtu(struct net_device *dev, int new_mtu)
263 263
264static int veth_dev_init(struct net_device *dev) 264static int veth_dev_init(struct net_device *dev)
265{ 265{
266 struct veth_net_stats *stats; 266 struct veth_net_stats __percpu *stats;
267 struct veth_priv *priv; 267 struct veth_priv *priv;
268 268
269 stats = alloc_percpu(struct veth_net_stats); 269 stats = alloc_percpu(struct veth_net_stats);
@@ -333,19 +333,17 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
333 struct veth_priv *priv; 333 struct veth_priv *priv;
334 char ifname[IFNAMSIZ]; 334 char ifname[IFNAMSIZ];
335 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp; 335 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
336 struct ifinfomsg *ifmp;
336 struct net *net; 337 struct net *net;
337 338
338 /* 339 /*
339 * create and register peer first 340 * create and register peer first
340 *
341 * struct ifinfomsg is at the head of VETH_INFO_PEER, but we
342 * skip it since no info from it is useful yet
343 */ 341 */
344
345 if (data != NULL && data[VETH_INFO_PEER] != NULL) { 342 if (data != NULL && data[VETH_INFO_PEER] != NULL) {
346 struct nlattr *nla_peer; 343 struct nlattr *nla_peer;
347 344
348 nla_peer = data[VETH_INFO_PEER]; 345 nla_peer = data[VETH_INFO_PEER];
346 ifmp = nla_data(nla_peer);
349 err = nla_parse(peer_tb, IFLA_MAX, 347 err = nla_parse(peer_tb, IFLA_MAX,
350 nla_data(nla_peer) + sizeof(struct ifinfomsg), 348 nla_data(nla_peer) + sizeof(struct ifinfomsg),
351 nla_len(nla_peer) - sizeof(struct ifinfomsg), 349 nla_len(nla_peer) - sizeof(struct ifinfomsg),
@@ -358,8 +356,10 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
358 return err; 356 return err;
359 357
360 tbp = peer_tb; 358 tbp = peer_tb;
361 } else 359 } else {
360 ifmp = NULL;
362 tbp = tb; 361 tbp = tb;
362 }
363 363
364 if (tbp[IFLA_IFNAME]) 364 if (tbp[IFLA_IFNAME])
365 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); 365 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
@@ -387,6 +387,10 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
387 387
388 netif_carrier_off(peer); 388 netif_carrier_off(peer);
389 389
390 err = rtnl_configure_link(peer, ifmp);
391 if (err < 0)
392 goto err_configure_peer;
393
390 /* 394 /*
391 * register dev last 395 * register dev last
392 * 396 *
@@ -428,6 +432,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
428err_register_dev: 432err_register_dev:
429 /* nothing to do */ 433 /* nothing to do */
430err_alloc_name: 434err_alloc_name:
435err_configure_peer:
431 unregister_netdevice(peer); 436 unregister_netdevice(peer);
432 return err; 437 return err;
433 438
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 611b80435955..50f881aa3939 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -267,7 +267,7 @@ enum rhine_quirks {
267/* Beware of PCI posted writes */ 267/* Beware of PCI posted writes */
268#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0) 268#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
269 269
270static const struct pci_device_id rhine_pci_tbl[] = { 270static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
271 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */ 271 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
272 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */ 272 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
273 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */ 273 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
@@ -1697,7 +1697,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
1697 rx_mode = 0x1C; 1697 rx_mode = 0x1C;
1698 iowrite32(0xffffffff, ioaddr + MulticastFilter0); 1698 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1699 iowrite32(0xffffffff, ioaddr + MulticastFilter1); 1699 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1700 } else if ((dev->mc_count > multicast_filter_limit) || 1700 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1701 (dev->flags & IFF_ALLMULTI)) { 1701 (dev->flags & IFF_ALLMULTI)) {
1702 /* Too many to match, or accept all multicasts. */ 1702 /* Too many to match, or accept all multicasts. */
1703 iowrite32(0xffffffff, ioaddr + MulticastFilter0); 1703 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
@@ -1705,10 +1705,9 @@ static void rhine_set_rx_mode(struct net_device *dev)
1705 rx_mode = 0x0C; 1705 rx_mode = 0x0C;
1706 } else { 1706 } else {
1707 struct dev_mc_list *mclist; 1707 struct dev_mc_list *mclist;
1708 int i; 1708
1709 memset(mc_filter, 0, sizeof(mc_filter)); 1709 memset(mc_filter, 0, sizeof(mc_filter));
1710 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1710 netdev_for_each_mc_addr(mclist, dev) {
1711 i++, mclist = mclist->next) {
1712 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 1711 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1713 1712
1714 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 1713 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 317aa34b21cf..3a486f3bad3d 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -361,7 +361,7 @@ static struct velocity_info_tbl chip_info_table[] = {
361 * Describe the PCI device identifiers that we support in this 361 * Describe the PCI device identifiers that we support in this
362 * device driver. Used for hotplug autoloading. 362 * device driver. Used for hotplug autoloading.
363 */ 363 */
364static const struct pci_device_id velocity_id_table[] __devinitdata = { 364static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = {
365 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) }, 365 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
366 { } 366 { }
367}; 367};
@@ -1132,7 +1132,7 @@ static void velocity_set_multi(struct net_device *dev)
1132 writel(0xffffffff, &regs->MARCAM[0]); 1132 writel(0xffffffff, &regs->MARCAM[0]);
1133 writel(0xffffffff, &regs->MARCAM[4]); 1133 writel(0xffffffff, &regs->MARCAM[4]);
1134 rx_mode = (RCR_AM | RCR_AB | RCR_PROM); 1134 rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
1135 } else if ((dev->mc_count > vptr->multicast_limit) || 1135 } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
1136 (dev->flags & IFF_ALLMULTI)) { 1136 (dev->flags & IFF_ALLMULTI)) {
1137 writel(0xffffffff, &regs->MARCAM[0]); 1137 writel(0xffffffff, &regs->MARCAM[0]);
1138 writel(0xffffffff, &regs->MARCAM[4]); 1138 writel(0xffffffff, &regs->MARCAM[4]);
@@ -1141,9 +1141,11 @@ static void velocity_set_multi(struct net_device *dev)
1141 int offset = MCAM_SIZE - vptr->multicast_limit; 1141 int offset = MCAM_SIZE - vptr->multicast_limit;
1142 mac_get_cam_mask(regs, vptr->mCAMmask); 1142 mac_get_cam_mask(regs, vptr->mCAMmask);
1143 1143
1144 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) { 1144 i = 0;
1145 netdev_for_each_mc_addr(mclist, dev) {
1145 mac_set_cam(regs, i + offset, mclist->dmi_addr); 1146 mac_set_cam(regs, i + offset, mclist->dmi_addr);
1146 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7); 1147 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1148 i++;
1147 } 1149 }
1148 1150
1149 mac_set_cam_mask(regs, vptr->mCAMmask); 1151 mac_set_cam_mask(regs, vptr->mCAMmask);
@@ -2698,10 +2700,8 @@ static void __devinit velocity_print_info(struct velocity_info *vptr)
2698 struct net_device *dev = vptr->dev; 2700 struct net_device *dev = vptr->dev;
2699 2701
2700 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id)); 2702 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
2701 printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", 2703 printk(KERN_INFO "%s: Ethernet Address: %pM\n",
2702 dev->name, 2704 dev->name, dev->dev_addr);
2703 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
2704 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
2705} 2705}
2706 2706
2707static u32 velocity_get_link(struct net_device *dev) 2707static u32 velocity_get_link(struct net_device *dev)
@@ -2825,7 +2825,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
2825 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); 2825 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
2826 2826
2827 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | 2827 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2828 NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM; 2828 NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM | NETIF_F_SG;
2829 2829
2830 ret = register_netdev(dev); 2830 ret = register_netdev(dev);
2831 if (ret < 0) 2831 if (ret < 0)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9ead30bd00c4..25dc77ccbf58 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -56,10 +56,6 @@ struct virtnet_info
56 /* Host will merge rx buffers for big packets (shake it! shake it!) */ 56 /* Host will merge rx buffers for big packets (shake it! shake it!) */
57 bool mergeable_rx_bufs; 57 bool mergeable_rx_bufs;
58 58
59 /* Receive & send queues. */
60 struct sk_buff_head recv;
61 struct sk_buff_head send;
62
63 /* Work struct for refilling if we run low on memory. */ 59 /* Work struct for refilling if we run low on memory. */
64 struct delayed_work refill; 60 struct delayed_work refill;
65 61
@@ -75,34 +71,44 @@ struct skb_vnet_hdr {
75 unsigned int num_sg; 71 unsigned int num_sg;
76}; 72};
77 73
74struct padded_vnet_hdr {
75 struct virtio_net_hdr hdr;
76 /*
77 * virtio_net_hdr should be in a separated sg buffer because of a
78 * QEMU bug, and data sg buffer shares same page with this header sg.
79 * This padding makes next sg 16 byte aligned after virtio_net_hdr.
80 */
81 char padding[6];
82};
83
78static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) 84static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
79{ 85{
80 return (struct skb_vnet_hdr *)skb->cb; 86 return (struct skb_vnet_hdr *)skb->cb;
81} 87}
82 88
83static void give_a_page(struct virtnet_info *vi, struct page *page) 89/*
84{ 90 * private is used to chain pages for big packets, put the whole
85 page->private = (unsigned long)vi->pages; 91 * most recent used list in the beginning for reuse
86 vi->pages = page; 92 */
87} 93static void give_pages(struct virtnet_info *vi, struct page *page)
88
89static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb)
90{ 94{
91 unsigned int i; 95 struct page *end;
92 96
93 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 97 /* Find end of list, sew whole thing into vi->pages. */
94 give_a_page(vi, skb_shinfo(skb)->frags[i].page); 98 for (end = page; end->private; end = (struct page *)end->private);
95 skb_shinfo(skb)->nr_frags = 0; 99 end->private = (unsigned long)vi->pages;
96 skb->data_len = 0; 100 vi->pages = page;
97} 101}
98 102
99static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask) 103static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
100{ 104{
101 struct page *p = vi->pages; 105 struct page *p = vi->pages;
102 106
103 if (p) 107 if (p) {
104 vi->pages = (struct page *)p->private; 108 vi->pages = (struct page *)p->private;
105 else 109 /* clear private here, it is used to chain pages */
110 p->private = 0;
111 } else
106 p = alloc_page(gfp_mask); 112 p = alloc_page(gfp_mask);
107 return p; 113 return p;
108} 114}
@@ -118,99 +124,142 @@ static void skb_xmit_done(struct virtqueue *svq)
118 netif_wake_queue(vi->dev); 124 netif_wake_queue(vi->dev);
119} 125}
120 126
121static void receive_skb(struct net_device *dev, struct sk_buff *skb, 127static void set_skb_frag(struct sk_buff *skb, struct page *page,
122 unsigned len) 128 unsigned int offset, unsigned int *len)
123{ 129{
124 struct virtnet_info *vi = netdev_priv(dev); 130 int i = skb_shinfo(skb)->nr_frags;
125 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); 131 skb_frag_t *f;
126 int err; 132
127 int i; 133 f = &skb_shinfo(skb)->frags[i];
134 f->size = min((unsigned)PAGE_SIZE - offset, *len);
135 f->page_offset = offset;
136 f->page = page;
137
138 skb->data_len += f->size;
139 skb->len += f->size;
140 skb_shinfo(skb)->nr_frags++;
141 *len -= f->size;
142}
128 143
129 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { 144static struct sk_buff *page_to_skb(struct virtnet_info *vi,
130 pr_debug("%s: short packet %i\n", dev->name, len); 145 struct page *page, unsigned int len)
131 dev->stats.rx_length_errors++; 146{
132 goto drop; 147 struct sk_buff *skb;
133 } 148 struct skb_vnet_hdr *hdr;
149 unsigned int copy, hdr_len, offset;
150 char *p;
134 151
135 if (vi->mergeable_rx_bufs) { 152 p = page_address(page);
136 unsigned int copy;
137 char *p = page_address(skb_shinfo(skb)->frags[0].page);
138 153
139 if (len > PAGE_SIZE) 154 /* copy small packet so we can reuse these pages for small data */
140 len = PAGE_SIZE; 155 skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
141 len -= sizeof(struct virtio_net_hdr_mrg_rxbuf); 156 if (unlikely(!skb))
142 157 return NULL;
143 memcpy(&hdr->mhdr, p, sizeof(hdr->mhdr));
144 p += sizeof(hdr->mhdr);
145 158
146 copy = len; 159 hdr = skb_vnet_hdr(skb);
147 if (copy > skb_tailroom(skb))
148 copy = skb_tailroom(skb);
149 160
150 memcpy(skb_put(skb, copy), p, copy); 161 if (vi->mergeable_rx_bufs) {
162 hdr_len = sizeof hdr->mhdr;
163 offset = hdr_len;
164 } else {
165 hdr_len = sizeof hdr->hdr;
166 offset = sizeof(struct padded_vnet_hdr);
167 }
151 168
152 len -= copy; 169 memcpy(hdr, p, hdr_len);
153 170
154 if (!len) { 171 len -= hdr_len;
155 give_a_page(vi, skb_shinfo(skb)->frags[0].page); 172 p += offset;
156 skb_shinfo(skb)->nr_frags--;
157 } else {
158 skb_shinfo(skb)->frags[0].page_offset +=
159 sizeof(hdr->mhdr) + copy;
160 skb_shinfo(skb)->frags[0].size = len;
161 skb->data_len += len;
162 skb->len += len;
163 }
164 173
165 while (--hdr->mhdr.num_buffers) { 174 copy = len;
166 struct sk_buff *nskb; 175 if (copy > skb_tailroom(skb))
176 copy = skb_tailroom(skb);
177 memcpy(skb_put(skb, copy), p, copy);
167 178
168 i = skb_shinfo(skb)->nr_frags; 179 len -= copy;
169 if (i >= MAX_SKB_FRAGS) { 180 offset += copy;
170 pr_debug("%s: packet too long %d\n", dev->name,
171 len);
172 dev->stats.rx_length_errors++;
173 goto drop;
174 }
175 181
176 nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len); 182 while (len) {
177 if (!nskb) { 183 set_skb_frag(skb, page, offset, &len);
178 pr_debug("%s: rx error: %d buffers missing\n", 184 page = (struct page *)page->private;
179 dev->name, hdr->mhdr.num_buffers); 185 offset = 0;
180 dev->stats.rx_length_errors++; 186 }
181 goto drop;
182 }
183 187
184 __skb_unlink(nskb, &vi->recv); 188 if (page)
185 vi->num--; 189 give_pages(vi, page);
186 190
187 skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0]; 191 return skb;
188 skb_shinfo(nskb)->nr_frags = 0; 192}
189 kfree_skb(nskb);
190 193
191 if (len > PAGE_SIZE) 194static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
192 len = PAGE_SIZE; 195{
196 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
197 struct page *page;
198 int num_buf, i, len;
199
200 num_buf = hdr->mhdr.num_buffers;
201 while (--num_buf) {
202 i = skb_shinfo(skb)->nr_frags;
203 if (i >= MAX_SKB_FRAGS) {
204 pr_debug("%s: packet too long\n", skb->dev->name);
205 skb->dev->stats.rx_length_errors++;
206 return -EINVAL;
207 }
193 208
194 skb_shinfo(skb)->frags[i].size = len; 209 page = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
195 skb_shinfo(skb)->nr_frags++; 210 if (!page) {
196 skb->data_len += len; 211 pr_debug("%s: rx error: %d buffers missing\n",
197 skb->len += len; 212 skb->dev->name, hdr->mhdr.num_buffers);
213 skb->dev->stats.rx_length_errors++;
214 return -EINVAL;
198 } 215 }
199 } else { 216 if (len > PAGE_SIZE)
200 len -= sizeof(hdr->hdr); 217 len = PAGE_SIZE;
218
219 set_skb_frag(skb, page, 0, &len);
220
221 --vi->num;
222 }
223 return 0;
224}
225
226static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
227{
228 struct virtnet_info *vi = netdev_priv(dev);
229 struct sk_buff *skb;
230 struct page *page;
231 struct skb_vnet_hdr *hdr;
201 232
202 if (len <= MAX_PACKET_LEN) 233 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
203 trim_pages(vi, skb); 234 pr_debug("%s: short packet %i\n", dev->name, len);
235 dev->stats.rx_length_errors++;
236 if (vi->mergeable_rx_bufs || vi->big_packets)
237 give_pages(vi, buf);
238 else
239 dev_kfree_skb(buf);
240 return;
241 }
204 242
205 err = pskb_trim(skb, len); 243 if (!vi->mergeable_rx_bufs && !vi->big_packets) {
206 if (err) { 244 skb = buf;
207 pr_debug("%s: pskb_trim failed %i %d\n", dev->name, 245 len -= sizeof(struct virtio_net_hdr);
208 len, err); 246 skb_trim(skb, len);
247 } else {
248 page = buf;
249 skb = page_to_skb(vi, page, len);
250 if (unlikely(!skb)) {
209 dev->stats.rx_dropped++; 251 dev->stats.rx_dropped++;
210 goto drop; 252 give_pages(vi, page);
253 return;
211 } 254 }
255 if (vi->mergeable_rx_bufs)
256 if (receive_mergeable(vi, skb)) {
257 dev_kfree_skb(skb);
258 return;
259 }
212 } 260 }
213 261
262 hdr = skb_vnet_hdr(skb);
214 skb->truesize += skb->data_len; 263 skb->truesize += skb->data_len;
215 dev->stats.rx_bytes += skb->len; 264 dev->stats.rx_bytes += skb->len;
216 dev->stats.rx_packets++; 265 dev->stats.rx_packets++;
@@ -267,110 +316,119 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
267 316
268frame_err: 317frame_err:
269 dev->stats.rx_frame_errors++; 318 dev->stats.rx_frame_errors++;
270drop:
271 dev_kfree_skb(skb); 319 dev_kfree_skb(skb);
272} 320}
273 321
274static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp) 322static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
275{ 323{
276 struct sk_buff *skb; 324 struct sk_buff *skb;
277 struct scatterlist sg[2+MAX_SKB_FRAGS]; 325 struct skb_vnet_hdr *hdr;
278 int num, err, i; 326 struct scatterlist sg[2];
279 bool oom = false; 327 int err;
280
281 sg_init_table(sg, 2+MAX_SKB_FRAGS);
282 do {
283 struct skb_vnet_hdr *hdr;
284 328
285 skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); 329 skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
286 if (unlikely(!skb)) { 330 if (unlikely(!skb))
287 oom = true; 331 return -ENOMEM;
288 break;
289 }
290 332
291 skb_put(skb, MAX_PACKET_LEN); 333 skb_put(skb, MAX_PACKET_LEN);
292 334
293 hdr = skb_vnet_hdr(skb); 335 hdr = skb_vnet_hdr(skb);
294 sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr)); 336 sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
295 337
296 if (vi->big_packets) { 338 skb_to_sgvec(skb, sg + 1, 0, skb->len);
297 for (i = 0; i < MAX_SKB_FRAGS; i++) {
298 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
299 f->page = get_a_page(vi, gfp);
300 if (!f->page)
301 break;
302 339
303 f->page_offset = 0; 340 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 2, skb);
304 f->size = PAGE_SIZE; 341 if (err < 0)
342 dev_kfree_skb(skb);
305 343
306 skb->data_len += PAGE_SIZE; 344 return err;
307 skb->len += PAGE_SIZE; 345}
308 346
309 skb_shinfo(skb)->nr_frags++; 347static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
310 } 348{
349 struct scatterlist sg[MAX_SKB_FRAGS + 2];
350 struct page *first, *list = NULL;
351 char *p;
352 int i, err, offset;
353
354 /* page in sg[MAX_SKB_FRAGS + 1] is list tail */
355 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
356 first = get_a_page(vi, gfp);
357 if (!first) {
358 if (list)
359 give_pages(vi, list);
360 return -ENOMEM;
311 } 361 }
362 sg_set_buf(&sg[i], page_address(first), PAGE_SIZE);
312 363
313 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 364 /* chain new page in list head to match sg */
314 skb_queue_head(&vi->recv, skb); 365 first->private = (unsigned long)list;
366 list = first;
367 }
315 368
316 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb); 369 first = get_a_page(vi, gfp);
317 if (err < 0) { 370 if (!first) {
318 skb_unlink(skb, &vi->recv); 371 give_pages(vi, list);
319 trim_pages(vi, skb); 372 return -ENOMEM;
320 kfree_skb(skb); 373 }
321 break; 374 p = page_address(first);
322 } 375
323 vi->num++; 376 /* sg[0], sg[1] share the same page */
324 } while (err >= num); 377 /* a separated sg[0] for virtio_net_hdr only during to QEMU bug*/
325 if (unlikely(vi->num > vi->max)) 378 sg_set_buf(&sg[0], p, sizeof(struct virtio_net_hdr));
326 vi->max = vi->num; 379
327 vi->rvq->vq_ops->kick(vi->rvq); 380 /* sg[1] for data packet, from offset */
328 return !oom; 381 offset = sizeof(struct padded_vnet_hdr);
382 sg_set_buf(&sg[1], p + offset, PAGE_SIZE - offset);
383
384 /* chain first in list head */
385 first->private = (unsigned long)list;
386 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2,
387 first);
388 if (err < 0)
389 give_pages(vi, first);
390
391 return err;
329} 392}
330 393
331/* Returns false if we couldn't fill entirely (OOM). */ 394static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
332static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
333{ 395{
334 struct sk_buff *skb; 396 struct page *page;
335 struct scatterlist sg[1]; 397 struct scatterlist sg;
336 int err; 398 int err;
337 bool oom = false;
338 399
339 if (!vi->mergeable_rx_bufs) 400 page = get_a_page(vi, gfp);
340 return try_fill_recv_maxbufs(vi, gfp); 401 if (!page)
402 return -ENOMEM;
341 403
342 do { 404 sg_init_one(&sg, page_address(page), PAGE_SIZE);
343 skb_frag_t *f;
344 405
345 skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); 406 err = vi->rvq->vq_ops->add_buf(vi->rvq, &sg, 0, 1, page);
346 if (unlikely(!skb)) { 407 if (err < 0)
347 oom = true; 408 give_pages(vi, page);
348 break;
349 }
350 409
351 f = &skb_shinfo(skb)->frags[0]; 410 return err;
352 f->page = get_a_page(vi, gfp); 411}
353 if (!f->page) {
354 oom = true;
355 kfree_skb(skb);
356 break;
357 }
358
359 f->page_offset = 0;
360 f->size = PAGE_SIZE;
361 412
362 skb_shinfo(skb)->nr_frags++; 413/* Returns false if we couldn't fill entirely (OOM). */
414static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
415{
416 int err;
417 bool oom = false;
363 418
364 sg_init_one(sg, page_address(f->page), PAGE_SIZE); 419 do {
365 skb_queue_head(&vi->recv, skb); 420 if (vi->mergeable_rx_bufs)
421 err = add_recvbuf_mergeable(vi, gfp);
422 else if (vi->big_packets)
423 err = add_recvbuf_big(vi, gfp);
424 else
425 err = add_recvbuf_small(vi, gfp);
366 426
367 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
368 if (err < 0) { 427 if (err < 0) {
369 skb_unlink(skb, &vi->recv); 428 oom = true;
370 kfree_skb(skb);
371 break; 429 break;
372 } 430 }
373 vi->num++; 431 ++vi->num;
374 } while (err > 0); 432 } while (err > 0);
375 if (unlikely(vi->num > vi->max)) 433 if (unlikely(vi->num > vi->max))
376 vi->max = vi->num; 434 vi->max = vi->num;
@@ -407,15 +465,14 @@ static void refill_work(struct work_struct *work)
407static int virtnet_poll(struct napi_struct *napi, int budget) 465static int virtnet_poll(struct napi_struct *napi, int budget)
408{ 466{
409 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); 467 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
410 struct sk_buff *skb = NULL; 468 void *buf;
411 unsigned int len, received = 0; 469 unsigned int len, received = 0;
412 470
413again: 471again:
414 while (received < budget && 472 while (received < budget &&
415 (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) { 473 (buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
416 __skb_unlink(skb, &vi->recv); 474 receive_buf(vi->dev, buf, len);
417 receive_skb(vi->dev, skb, len); 475 --vi->num;
418 vi->num--;
419 received++; 476 received++;
420 } 477 }
421 478
@@ -445,7 +502,6 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
445 502
446 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) { 503 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
447 pr_debug("Sent skb %p\n", skb); 504 pr_debug("Sent skb %p\n", skb);
448 __skb_unlink(skb, &vi->send);
449 vi->dev->stats.tx_bytes += skb->len; 505 vi->dev->stats.tx_bytes += skb->len;
450 vi->dev->stats.tx_packets++; 506 vi->dev->stats.tx_packets++;
451 tot_sgs += skb_vnet_hdr(skb)->num_sg; 507 tot_sgs += skb_vnet_hdr(skb)->num_sg;
@@ -495,9 +551,9 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
495 551
496 /* Encode metadata header at front. */ 552 /* Encode metadata header at front. */
497 if (vi->mergeable_rx_bufs) 553 if (vi->mergeable_rx_bufs)
498 sg_set_buf(sg, &hdr->mhdr, sizeof(hdr->mhdr)); 554 sg_set_buf(sg, &hdr->mhdr, sizeof hdr->mhdr);
499 else 555 else
500 sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr)); 556 sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
501 557
502 hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 558 hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
503 return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb); 559 return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
@@ -528,15 +584,6 @@ again:
528 } 584 }
529 vi->svq->vq_ops->kick(vi->svq); 585 vi->svq->vq_ops->kick(vi->svq);
530 586
531 /*
532 * Put new one in send queue. You'd expect we'd need this before
533 * xmit_skb calls add_buf(), since the callback can be triggered
534 * immediately after that. But since the callback just triggers
535 * another call back here, normal network xmit locking prevents the
536 * race.
537 */
538 __skb_queue_head(&vi->send, skb);
539
540 /* Don't wait up for transmitted skbs to be freed. */ 587 /* Don't wait up for transmitted skbs to be freed. */
541 skb_orphan(skb); 588 skb_orphan(skb);
542 nf_reset(skb); 589 nf_reset(skb);
@@ -674,6 +721,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
674 struct virtio_net_ctrl_mac *mac_data; 721 struct virtio_net_ctrl_mac *mac_data;
675 struct dev_addr_list *addr; 722 struct dev_addr_list *addr;
676 struct netdev_hw_addr *ha; 723 struct netdev_hw_addr *ha;
724 int uc_count;
725 int mc_count;
677 void *buf; 726 void *buf;
678 int i; 727 int i;
679 728
@@ -700,9 +749,12 @@ static void virtnet_set_rx_mode(struct net_device *dev)
700 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 749 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
701 allmulti ? "en" : "dis"); 750 allmulti ? "en" : "dis");
702 751
752 uc_count = netdev_uc_count(dev);
753 mc_count = netdev_mc_count(dev);
703 /* MAC filter - use one buffer for both lists */ 754 /* MAC filter - use one buffer for both lists */
704 mac_data = buf = kzalloc(((dev->uc.count + dev->mc_count) * ETH_ALEN) + 755 buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
705 (2 * sizeof(mac_data->entries)), GFP_ATOMIC); 756 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
757 mac_data = buf;
706 if (!buf) { 758 if (!buf) {
707 dev_warn(&dev->dev, "No memory for MAC address buffer\n"); 759 dev_warn(&dev->dev, "No memory for MAC address buffer\n");
708 return; 760 return;
@@ -711,24 +763,24 @@ static void virtnet_set_rx_mode(struct net_device *dev)
711 sg_init_table(sg, 2); 763 sg_init_table(sg, 2);
712 764
713 /* Store the unicast list and count in the front of the buffer */ 765 /* Store the unicast list and count in the front of the buffer */
714 mac_data->entries = dev->uc.count; 766 mac_data->entries = uc_count;
715 i = 0; 767 i = 0;
716 list_for_each_entry(ha, &dev->uc.list, list) 768 netdev_for_each_uc_addr(ha, dev)
717 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 769 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
718 770
719 sg_set_buf(&sg[0], mac_data, 771 sg_set_buf(&sg[0], mac_data,
720 sizeof(mac_data->entries) + (dev->uc.count * ETH_ALEN)); 772 sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
721 773
722 /* multicast list and count fill the end */ 774 /* multicast list and count fill the end */
723 mac_data = (void *)&mac_data->macs[dev->uc.count][0]; 775 mac_data = (void *)&mac_data->macs[uc_count][0];
724 776
725 mac_data->entries = dev->mc_count; 777 mac_data->entries = mc_count;
726 addr = dev->mc_list; 778 i = 0;
727 for (i = 0; i < dev->mc_count; i++, addr = addr->next) 779 netdev_for_each_mc_addr(addr, dev)
728 memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN); 780 memcpy(&mac_data->macs[i++][0], addr->da_addr, ETH_ALEN);
729 781
730 sg_set_buf(&sg[1], mac_data, 782 sg_set_buf(&sg[1], mac_data,
731 sizeof(mac_data->entries) + (dev->mc_count * ETH_ALEN)); 783 sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
732 784
733 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 785 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
734 VIRTIO_NET_CTRL_MAC_TABLE_SET, 786 VIRTIO_NET_CTRL_MAC_TABLE_SET,
@@ -915,10 +967,6 @@ static int virtnet_probe(struct virtio_device *vdev)
915 dev->features |= NETIF_F_HW_VLAN_FILTER; 967 dev->features |= NETIF_F_HW_VLAN_FILTER;
916 } 968 }
917 969
918 /* Initialize our empty receive and send queues. */
919 skb_queue_head_init(&vi->recv);
920 skb_queue_head_init(&vi->send);
921
922 err = register_netdev(dev); 970 err = register_netdev(dev);
923 if (err) { 971 if (err) {
924 pr_debug("virtio_net: registering device failed\n"); 972 pr_debug("virtio_net: registering device failed\n");
@@ -951,26 +999,42 @@ free:
951 return err; 999 return err;
952} 1000}
953 1001
1002static void free_unused_bufs(struct virtnet_info *vi)
1003{
1004 void *buf;
1005 while (1) {
1006 buf = vi->svq->vq_ops->detach_unused_buf(vi->svq);
1007 if (!buf)
1008 break;
1009 dev_kfree_skb(buf);
1010 }
1011 while (1) {
1012 buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq);
1013 if (!buf)
1014 break;
1015 if (vi->mergeable_rx_bufs || vi->big_packets)
1016 give_pages(vi, buf);
1017 else
1018 dev_kfree_skb(buf);
1019 --vi->num;
1020 }
1021 BUG_ON(vi->num != 0);
1022}
1023
954static void __devexit virtnet_remove(struct virtio_device *vdev) 1024static void __devexit virtnet_remove(struct virtio_device *vdev)
955{ 1025{
956 struct virtnet_info *vi = vdev->priv; 1026 struct virtnet_info *vi = vdev->priv;
957 struct sk_buff *skb;
958 1027
959 /* Stop all the virtqueues. */ 1028 /* Stop all the virtqueues. */
960 vdev->config->reset(vdev); 1029 vdev->config->reset(vdev);
961 1030
962 /* Free our skbs in send and recv queues, if any. */
963 while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
964 kfree_skb(skb);
965 vi->num--;
966 }
967 __skb_queue_purge(&vi->send);
968
969 BUG_ON(vi->num != 0);
970 1031
971 unregister_netdev(vi->dev); 1032 unregister_netdev(vi->dev);
972 cancel_delayed_work_sync(&vi->refill); 1033 cancel_delayed_work_sync(&vi->refill);
973 1034
1035 /* Free unused buffers in both send and recv, if any. */
1036 free_unused_bufs(vi);
1037
974 vdev->config->del_vqs(vi->vdev); 1038 vdev->config->del_vqs(vi->vdev);
975 1039
976 while (vi->pages) 1040 while (vi->pages)
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 9cc438282d77..cff3485d9673 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -35,7 +35,7 @@ char vmxnet3_driver_name[] = "vmxnet3";
35 * PCI Device ID Table 35 * PCI Device ID Table
36 * Last entry must be all 0s 36 * Last entry must be all 0s
37 */ 37 */
38static const struct pci_device_id vmxnet3_pciid_table[] = { 38static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
39 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)}, 39 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
40 {0} 40 {0}
41}; 41};
@@ -1668,22 +1668,19 @@ static u8 *
1668vmxnet3_copy_mc(struct net_device *netdev) 1668vmxnet3_copy_mc(struct net_device *netdev)
1669{ 1669{
1670 u8 *buf = NULL; 1670 u8 *buf = NULL;
1671 u32 sz = netdev->mc_count * ETH_ALEN; 1671 u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
1672 1672
1673 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */ 1673 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
1674 if (sz <= 0xffff) { 1674 if (sz <= 0xffff) {
1675 /* We may be called with BH disabled */ 1675 /* We may be called with BH disabled */
1676 buf = kmalloc(sz, GFP_ATOMIC); 1676 buf = kmalloc(sz, GFP_ATOMIC);
1677 if (buf) { 1677 if (buf) {
1678 int i; 1678 struct dev_mc_list *mc;
1679 struct dev_mc_list *mc = netdev->mc_list; 1679 int i = 0;
1680 1680
1681 for (i = 0; i < netdev->mc_count; i++) { 1681 netdev_for_each_mc_addr(mc, netdev)
1682 BUG_ON(!mc); 1682 memcpy(buf + i++ * ETH_ALEN, mc->dmi_addr,
1683 memcpy(buf + i * ETH_ALEN, mc->dmi_addr,
1684 ETH_ALEN); 1683 ETH_ALEN);
1685 mc = mc->next;
1686 }
1687 } 1684 }
1688 } 1685 }
1689 return buf; 1686 return buf;
@@ -1708,12 +1705,12 @@ vmxnet3_set_mc(struct net_device *netdev)
1708 if (netdev->flags & IFF_ALLMULTI) 1705 if (netdev->flags & IFF_ALLMULTI)
1709 new_mode |= VMXNET3_RXM_ALL_MULTI; 1706 new_mode |= VMXNET3_RXM_ALL_MULTI;
1710 else 1707 else
1711 if (netdev->mc_count > 0) { 1708 if (!netdev_mc_empty(netdev)) {
1712 new_table = vmxnet3_copy_mc(netdev); 1709 new_table = vmxnet3_copy_mc(netdev);
1713 if (new_table) { 1710 if (new_table) {
1714 new_mode |= VMXNET3_RXM_MCAST; 1711 new_mode |= VMXNET3_RXM_MCAST;
1715 rxConf->mfTableLen = cpu_to_le16( 1712 rxConf->mfTableLen = cpu_to_le16(
1716 netdev->mc_count * ETH_ALEN); 1713 netdev_mc_count(netdev) * ETH_ALEN);
1717 rxConf->mfTablePA = cpu_to_le64(virt_to_phys( 1714 rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
1718 new_table)); 1715 new_table));
1719 } else { 1716 } else {
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index b9685e82f7b6..46a7c9e689ec 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -54,7 +54,7 @@ MODULE_LICENSE("Dual BSD/GPL");
54MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O" 54MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
55 "Virtualized Server Adapter"); 55 "Virtualized Server Adapter");
56 56
57static struct pci_device_id vxge_id_table[] __devinitdata = { 57static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = {
58 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID, 58 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
59 PCI_ANY_ID}, 59 PCI_ANY_ID},
60 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID, 60 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
@@ -1178,11 +1178,11 @@ static void vxge_set_multicast(struct net_device *dev)
1178 1178
1179 memset(&mac_info, 0, sizeof(struct macInfo)); 1179 memset(&mac_info, 0, sizeof(struct macInfo));
1180 /* Update individual M_CAST address list */ 1180 /* Update individual M_CAST address list */
1181 if ((!vdev->all_multi_flg) && dev->mc_count) { 1181 if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
1182 1182
1183 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; 1183 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1184 list_head = &vdev->vpaths[0].mac_addr_list; 1184 list_head = &vdev->vpaths[0].mac_addr_list;
1185 if ((dev->mc_count + 1185 if ((netdev_mc_count(dev) +
1186 (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) > 1186 (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
1187 vdev->vpaths[0].max_mac_addr_cnt) 1187 vdev->vpaths[0].max_mac_addr_cnt)
1188 goto _set_all_mcast; 1188 goto _set_all_mcast;
@@ -1217,9 +1217,7 @@ static void vxge_set_multicast(struct net_device *dev)
1217 } 1217 }
1218 1218
1219 /* Add new ones */ 1219 /* Add new ones */
1220 for (i = 0, mclist = dev->mc_list; i < dev->mc_count; 1220 netdev_for_each_mc_addr(mclist, dev) {
1221 i++, mclist = mclist->next) {
1222
1223 memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN); 1221 memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN);
1224 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; 1222 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1225 vpath_idx++) { 1223 vpath_idx++) {
@@ -4297,10 +4295,8 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4297 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter", 4295 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
4298 vdev->ndev->name, ll_config.device_hw_info.product_desc); 4296 vdev->ndev->name, ll_config.device_hw_info.product_desc);
4299 4297
4300 vxge_debug_init(VXGE_TRACE, 4298 vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
4301 "%s: MAC ADDR: %02X:%02X:%02X:%02X:%02X:%02X", 4299 vdev->ndev->name, macaddr);
4302 vdev->ndev->name, macaddr[0], macaddr[1], macaddr[2],
4303 macaddr[3], macaddr[4], macaddr[5]);
4304 4300
4305 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d", 4301 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
4306 vdev->ndev->name, vxge_hw_device_link_width_get(hldev)); 4302 vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 3f759daf3ca4..f88c07c13197 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -2050,7 +2050,7 @@ static int __init dscc4_setup(char *str)
2050__setup("dscc4.setup=", dscc4_setup); 2050__setup("dscc4.setup=", dscc4_setup);
2051#endif 2051#endif
2052 2052
2053static struct pci_device_id dscc4_pci_tbl[] = { 2053static DEFINE_PCI_DEVICE_TABLE(dscc4_pci_tbl) = {
2054 { PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4, 2054 { PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4,
2055 PCI_ANY_ID, PCI_ANY_ID, }, 2055 PCI_ANY_ID, PCI_ANY_ID, },
2056 { 0,} 2056 { 0,}
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 9bc2e3649157..40d724a8e020 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -528,7 +528,7 @@ static int fst_debug_mask = { FST_DEBUG };
528/* 528/*
529 * PCI ID lookup table 529 * PCI ID lookup table
530 */ 530 */
531static struct pci_device_id fst_pci_dev_id[] __devinitdata = { 531static DEFINE_PCI_DEVICE_TABLE(fst_pci_dev_id) = {
532 {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2P, PCI_ANY_ID, 532 {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2P, PCI_ANY_ID,
533 PCI_ANY_ID, 0, 0, FST_TYPE_T2P}, 533 PCI_ANY_ID, 0, 0, FST_TYPE_T2P},
534 534
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 4b6f27e7c820..b27850377121 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -77,7 +77,7 @@
77 77
78static int LMC_PKT_BUF_SZ = 1542; 78static int LMC_PKT_BUF_SZ = 1542;
79 79
80static struct pci_device_id lmc_pci_tbl[] = { 80static DEFINE_PCI_DEVICE_TABLE(lmc_pci_tbl) = {
81 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST, 81 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
82 PCI_VENDOR_ID_LMC, PCI_ANY_ID }, 82 PCI_VENDOR_ID_LMC, PCI_ANY_ID },
83 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST, 83 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index aec4d3955420..f4f1c00d0d23 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -251,7 +251,7 @@ static char rcsid[] =
251#undef PC300_DEBUG_RX 251#undef PC300_DEBUG_RX
252#undef PC300_DEBUG_OTHER 252#undef PC300_DEBUG_OTHER
253 253
254static struct pci_device_id cpc_pci_dev_id[] __devinitdata = { 254static DEFINE_PCI_DEVICE_TABLE(cpc_pci_dev_id) = {
255 /* PC300/RSV or PC300/X21, 2 chan */ 255 /* PC300/RSV or PC300/X21, 2 chan */
256 {0x120e, 0x300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x300}, 256 {0x120e, 0x300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x300},
257 /* PC300/RSV or PC300/X21, 1 chan */ 257 /* PC300/RSV or PC300/X21, 1 chan */
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index 60ece54bdd94..c7ab3becd261 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -481,7 +481,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
481 481
482 482
483 483
484static struct pci_device_id pc300_pci_tbl[] __devinitdata = { 484static DEFINE_PCI_DEVICE_TABLE(pc300_pci_tbl) = {
485 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_1, PCI_ANY_ID, 485 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_1, PCI_ANY_ID,
486 PCI_ANY_ID, 0, 0, 0 }, 486 PCI_ANY_ID, 0, 0, 0 },
487 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_2, PCI_ANY_ID, 487 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_2, PCI_ANY_ID,
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index f1340faaf022..e2cff64a446a 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -417,7 +417,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
417 417
418 418
419 419
420static struct pci_device_id pci200_pci_tbl[] __devinitdata = { 420static DEFINE_PCI_DEVICE_TABLE(pci200_pci_tbl) = {
421 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX, 421 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
422 PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 }, 422 PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 },
423 { 0, } 423 { 0, }
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index daee8a0624ee..541c700dceef 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -814,7 +814,7 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
814 return 0; 814 return 0;
815} 815}
816 816
817static struct pci_device_id wanxl_pci_tbl[] __devinitdata = { 817static DEFINE_PCI_DEVICE_TABLE(wanxl_pci_tbl) = {
818 { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID, 818 { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID,
819 PCI_ANY_ID, 0, 0, 0 }, 819 PCI_ANY_ID, 0, 0, 0 },
820 { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID, 820 { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID,
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 96a615fe09de..6cead321bc15 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -301,24 +301,15 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
301 /* Extract MAC addresss */ 301 /* Extract MAC addresss */
302 ddi = (void *) skb->data; 302 ddi = (void *) skb->data;
303 BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address)); 303 BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address));
304 d_printf(2, dev, "GET DEVICE INFO: mac addr " 304 d_printf(2, dev, "GET DEVICE INFO: mac addr %pM\n",
305 "%02x:%02x:%02x:%02x:%02x:%02x\n", 305 ddi->mac_address);
306 ddi->mac_address[0], ddi->mac_address[1],
307 ddi->mac_address[2], ddi->mac_address[3],
308 ddi->mac_address[4], ddi->mac_address[5]);
309 if (!memcmp(net_dev->perm_addr, ddi->mac_address, 306 if (!memcmp(net_dev->perm_addr, ddi->mac_address,
310 sizeof(ddi->mac_address))) 307 sizeof(ddi->mac_address)))
311 goto ok; 308 goto ok;
312 dev_warn(dev, "warning: device reports a different MAC address " 309 dev_warn(dev, "warning: device reports a different MAC address "
313 "to that of boot mode's\n"); 310 "to that of boot mode's\n");
314 dev_warn(dev, "device reports %02x:%02x:%02x:%02x:%02x:%02x\n", 311 dev_warn(dev, "device reports %pM\n", ddi->mac_address);
315 ddi->mac_address[0], ddi->mac_address[1], 312 dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr);
316 ddi->mac_address[2], ddi->mac_address[3],
317 ddi->mac_address[4], ddi->mac_address[5]);
318 dev_warn(dev, "boot mode reported %02x:%02x:%02x:%02x:%02x:%02x\n",
319 net_dev->perm_addr[0], net_dev->perm_addr[1],
320 net_dev->perm_addr[2], net_dev->perm_addr[3],
321 net_dev->perm_addr[4], net_dev->perm_addr[5]);
322 if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac))) 313 if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac)))
323 dev_err(dev, "device reports an invalid MAC address, " 314 dev_err(dev, "device reports an invalid MAC address, "
324 "not updating\n"); 315 "not updating\n");
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 64cdfeb299ca..e803a7dc6502 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -1041,21 +1041,14 @@ int i2400m_read_mac_addr(struct i2400m *i2400m)
1041 dev_err(dev, "BM: read mac addr failed: %d\n", result); 1041 dev_err(dev, "BM: read mac addr failed: %d\n", result);
1042 goto error_read_mac; 1042 goto error_read_mac;
1043 } 1043 }
1044 d_printf(2, dev, 1044 d_printf(2, dev, "mac addr is %pM\n", ack_buf.ack_pl);
1045 "mac addr is %02x:%02x:%02x:%02x:%02x:%02x\n",
1046 ack_buf.ack_pl[0], ack_buf.ack_pl[1],
1047 ack_buf.ack_pl[2], ack_buf.ack_pl[3],
1048 ack_buf.ack_pl[4], ack_buf.ack_pl[5]);
1049 if (i2400m->bus_bm_mac_addr_impaired == 1) { 1045 if (i2400m->bus_bm_mac_addr_impaired == 1) {
1050 ack_buf.ack_pl[0] = 0x00; 1046 ack_buf.ack_pl[0] = 0x00;
1051 ack_buf.ack_pl[1] = 0x16; 1047 ack_buf.ack_pl[1] = 0x16;
1052 ack_buf.ack_pl[2] = 0xd3; 1048 ack_buf.ack_pl[2] = 0xd3;
1053 get_random_bytes(&ack_buf.ack_pl[3], 3); 1049 get_random_bytes(&ack_buf.ack_pl[3], 3);
1054 dev_err(dev, "BM is MAC addr impaired, faking MAC addr to " 1050 dev_err(dev, "BM is MAC addr impaired, faking MAC addr to "
1055 "mac addr is %02x:%02x:%02x:%02x:%02x:%02x\n", 1051 "mac addr is %pM\n", ack_buf.ack_pl);
1056 ack_buf.ack_pl[0], ack_buf.ack_pl[1],
1057 ack_buf.ack_pl[2], ack_buf.ack_pl[3],
1058 ack_buf.ack_pl[4], ack_buf.ack_pl[5]);
1059 result = 0; 1052 result = 0;
1060 } 1053 }
1061 net_dev->addr_len = ETH_ALEN; 1054 net_dev->addr_len = ETH_ALEN;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 56dd6650c97a..588943660755 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -112,6 +112,7 @@ config AIRO_CS
112 depends on PCMCIA && (BROKEN || !M32R) 112 depends on PCMCIA && (BROKEN || !M32R)
113 select WIRELESS_EXT 113 select WIRELESS_EXT
114 select WEXT_SPY 114 select WEXT_SPY
115 select WEXT_PRIV
115 select CRYPTO 116 select CRYPTO
116 select CRYPTO_AES 117 select CRYPTO_AES
117 ---help--- 118 ---help---
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 39410016b4ff..547912e6843f 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -39,7 +39,7 @@ static unsigned int rx_ring_size __read_mostly = 16;
39module_param(tx_ring_size, uint, 0); 39module_param(tx_ring_size, uint, 0);
40module_param(rx_ring_size, uint, 0); 40module_param(rx_ring_size, uint, 0);
41 41
42static struct pci_device_id adm8211_pci_id_table[] __devinitdata = { 42static DEFINE_PCI_DEVICE_TABLE(adm8211_pci_id_table) = {
43 /* ADMtek ADM8211 */ 43 /* ADMtek ADM8211 */
44 { PCI_DEVICE(0x10B7, 0x6000) }, /* 3Com 3CRSHPW796 */ 44 { PCI_DEVICE(0x10B7, 0x6000) }, /* 3Com 3CRSHPW796 */
45 { PCI_DEVICE(0x1200, 0x8201) }, /* ? */ 45 { PCI_DEVICE(0x1200, 0x8201) }, /* ? */
@@ -302,18 +302,6 @@ static int adm8211_get_stats(struct ieee80211_hw *dev,
302 return 0; 302 return 0;
303} 303}
304 304
305static int adm8211_get_tx_stats(struct ieee80211_hw *dev,
306 struct ieee80211_tx_queue_stats *stats)
307{
308 struct adm8211_priv *priv = dev->priv;
309
310 stats[0].len = priv->cur_tx - priv->dirty_tx;
311 stats[0].limit = priv->tx_ring_size - 2;
312 stats[0].count = priv->dirty_tx;
313
314 return 0;
315}
316
317static void adm8211_interrupt_tci(struct ieee80211_hw *dev) 305static void adm8211_interrupt_tci(struct ieee80211_hw *dev)
318{ 306{
319 struct adm8211_priv *priv = dev->priv; 307 struct adm8211_priv *priv = dev->priv;
@@ -1400,15 +1388,15 @@ static void adm8211_configure_filter(struct ieee80211_hw *dev,
1400} 1388}
1401 1389
1402static int adm8211_add_interface(struct ieee80211_hw *dev, 1390static int adm8211_add_interface(struct ieee80211_hw *dev,
1403 struct ieee80211_if_init_conf *conf) 1391 struct ieee80211_vif *vif)
1404{ 1392{
1405 struct adm8211_priv *priv = dev->priv; 1393 struct adm8211_priv *priv = dev->priv;
1406 if (priv->mode != NL80211_IFTYPE_MONITOR) 1394 if (priv->mode != NL80211_IFTYPE_MONITOR)
1407 return -EOPNOTSUPP; 1395 return -EOPNOTSUPP;
1408 1396
1409 switch (conf->type) { 1397 switch (vif->type) {
1410 case NL80211_IFTYPE_STATION: 1398 case NL80211_IFTYPE_STATION:
1411 priv->mode = conf->type; 1399 priv->mode = vif->type;
1412 break; 1400 break;
1413 default: 1401 default:
1414 return -EOPNOTSUPP; 1402 return -EOPNOTSUPP;
@@ -1416,8 +1404,8 @@ static int adm8211_add_interface(struct ieee80211_hw *dev,
1416 1404
1417 ADM8211_IDLE(); 1405 ADM8211_IDLE();
1418 1406
1419 ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)conf->mac_addr)); 1407 ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)vif->addr));
1420 ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(conf->mac_addr + 4))); 1408 ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(vif->addr + 4)));
1421 1409
1422 adm8211_update_mode(dev); 1410 adm8211_update_mode(dev);
1423 1411
@@ -1427,7 +1415,7 @@ static int adm8211_add_interface(struct ieee80211_hw *dev,
1427} 1415}
1428 1416
1429static void adm8211_remove_interface(struct ieee80211_hw *dev, 1417static void adm8211_remove_interface(struct ieee80211_hw *dev,
1430 struct ieee80211_if_init_conf *conf) 1418 struct ieee80211_vif *vif)
1431{ 1419{
1432 struct adm8211_priv *priv = dev->priv; 1420 struct adm8211_priv *priv = dev->priv;
1433 priv->mode = NL80211_IFTYPE_MONITOR; 1421 priv->mode = NL80211_IFTYPE_MONITOR;
@@ -1773,7 +1761,6 @@ static const struct ieee80211_ops adm8211_ops = {
1773 .prepare_multicast = adm8211_prepare_multicast, 1761 .prepare_multicast = adm8211_prepare_multicast,
1774 .configure_filter = adm8211_configure_filter, 1762 .configure_filter = adm8211_configure_filter,
1775 .get_stats = adm8211_get_stats, 1763 .get_stats = adm8211_get_stats,
1776 .get_tx_stats = adm8211_get_tx_stats,
1777 .get_tsf = adm8211_get_tsft 1764 .get_tsf = adm8211_get_tsft
1778}; 1765};
1779 1766
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 4331d675fcc6..698d5672a070 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -51,13 +51,14 @@
51#include <linux/freezer.h> 51#include <linux/freezer.h>
52 52
53#include <linux/ieee80211.h> 53#include <linux/ieee80211.h>
54#include <net/iw_handler.h>
54 55
55#include "airo.h" 56#include "airo.h"
56 57
57#define DRV_NAME "airo" 58#define DRV_NAME "airo"
58 59
59#ifdef CONFIG_PCI 60#ifdef CONFIG_PCI
60static struct pci_device_id card_ids[] = { 61static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
61 { 0x14b9, 1, PCI_ANY_ID, PCI_ANY_ID, }, 62 { 0x14b9, 1, PCI_ANY_ID, PCI_ANY_ID, },
62 { 0x14b9, 0x4500, PCI_ANY_ID, PCI_ANY_ID }, 63 { 0x14b9, 0x4500, PCI_ANY_ID, PCI_ANY_ID },
63 { 0x14b9, 0x4800, PCI_ANY_ID, PCI_ANY_ID, }, 64 { 0x14b9, 0x4800, PCI_ANY_ID, PCI_ANY_ID, },
@@ -2310,7 +2311,7 @@ static void airo_set_multicast_list(struct net_device *dev) {
2310 airo_set_promisc(ai); 2311 airo_set_promisc(ai);
2311 } 2312 }
2312 2313
2313 if ((dev->flags&IFF_ALLMULTI)||dev->mc_count>0) { 2314 if ((dev->flags&IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
2314 /* Turn on multicast. (Should be already setup...) */ 2315 /* Turn on multicast. (Should be already setup...) */
2315 } 2316 }
2316} 2317}
@@ -5254,11 +5255,7 @@ static int set_wep_key(struct airo_info *ai, u16 index, const char *key,
5254 WepKeyRid wkr; 5255 WepKeyRid wkr;
5255 int rc; 5256 int rc;
5256 5257
5257 if (keylen == 0) { 5258 WARN_ON(keylen == 0);
5258 airo_print_err(ai->dev->name, "%s: key length to set was zero",
5259 __func__);
5260 return -1;
5261 }
5262 5259
5263 memset(&wkr, 0, sizeof(wkr)); 5260 memset(&wkr, 0, sizeof(wkr));
5264 wkr.len = cpu_to_le16(sizeof(wkr)); 5261 wkr.len = cpu_to_le16(sizeof(wkr));
@@ -6405,11 +6402,7 @@ static int airo_set_encode(struct net_device *dev,
6405 if (dwrq->length > MIN_KEY_SIZE) 6402 if (dwrq->length > MIN_KEY_SIZE)
6406 key.len = MAX_KEY_SIZE; 6403 key.len = MAX_KEY_SIZE;
6407 else 6404 else
6408 if (dwrq->length > 0) 6405 key.len = MIN_KEY_SIZE;
6409 key.len = MIN_KEY_SIZE;
6410 else
6411 /* Disable the key */
6412 key.len = 0;
6413 /* Check if the key is not marked as invalid */ 6406 /* Check if the key is not marked as invalid */
6414 if(!(dwrq->flags & IW_ENCODE_NOKEY)) { 6407 if(!(dwrq->flags & IW_ENCODE_NOKEY)) {
6415 /* Cleanup */ 6408 /* Cleanup */
@@ -6590,12 +6583,22 @@ static int airo_set_encodeext(struct net_device *dev,
6590 default: 6583 default:
6591 return -EINVAL; 6584 return -EINVAL;
6592 } 6585 }
6593 /* Send the key to the card */ 6586 if (key.len == 0) {
6594 rc = set_wep_key(local, idx, key.key, key.len, perm, 1); 6587 rc = set_wep_tx_idx(local, idx, perm, 1);
6595 if (rc < 0) { 6588 if (rc < 0) {
6596 airo_print_err(local->dev->name, "failed to set WEP key" 6589 airo_print_err(local->dev->name,
6597 " at index %d: %d.", idx, rc); 6590 "failed to set WEP transmit index to %d: %d.",
6598 return rc; 6591 idx, rc);
6592 return rc;
6593 }
6594 } else {
6595 rc = set_wep_key(local, idx, key.key, key.len, perm, 1);
6596 if (rc < 0) {
6597 airo_print_err(local->dev->name,
6598 "failed to set WEP key at index %d: %d.",
6599 idx, rc);
6600 return rc;
6601 }
6599 } 6602 }
6600 } 6603 }
6601 6604
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 2517364d3ebe..0fb419936dff 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1789,7 +1789,7 @@ static void at76_mac80211_stop(struct ieee80211_hw *hw)
1789} 1789}
1790 1790
1791static int at76_add_interface(struct ieee80211_hw *hw, 1791static int at76_add_interface(struct ieee80211_hw *hw,
1792 struct ieee80211_if_init_conf *conf) 1792 struct ieee80211_vif *vif)
1793{ 1793{
1794 struct at76_priv *priv = hw->priv; 1794 struct at76_priv *priv = hw->priv;
1795 int ret = 0; 1795 int ret = 0;
@@ -1798,7 +1798,7 @@ static int at76_add_interface(struct ieee80211_hw *hw,
1798 1798
1799 mutex_lock(&priv->mtx); 1799 mutex_lock(&priv->mtx);
1800 1800
1801 switch (conf->type) { 1801 switch (vif->type) {
1802 case NL80211_IFTYPE_STATION: 1802 case NL80211_IFTYPE_STATION:
1803 priv->iw_mode = IW_MODE_INFRA; 1803 priv->iw_mode = IW_MODE_INFRA;
1804 break; 1804 break;
@@ -1814,7 +1814,7 @@ exit:
1814} 1814}
1815 1815
1816static void at76_remove_interface(struct ieee80211_hw *hw, 1816static void at76_remove_interface(struct ieee80211_hw *hw,
1817 struct ieee80211_if_init_conf *conf) 1817 struct ieee80211_vif *vif)
1818{ 1818{
1819 at76_dbg(DBG_MAC80211, "%s()", __func__); 1819 at76_dbg(DBG_MAC80211, "%s()", __func__);
1820} 1820}
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index 9f9459860d82..8c8ce67971e9 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -109,7 +109,6 @@ struct ar9170_rxstream_mpdu_merge {
109 bool has_plcp; 109 bool has_plcp;
110}; 110};
111 111
112#define AR9170_NUM_MAX_BA_RETRY 5
113#define AR9170_NUM_TID 16 112#define AR9170_NUM_TID 16
114#define WME_BA_BMP_SIZE 64 113#define WME_BA_BMP_SIZE 64
115#define AR9170_NUM_MAX_AGG_LEN (2 * WME_BA_BMP_SIZE) 114#define AR9170_NUM_MAX_AGG_LEN (2 * WME_BA_BMP_SIZE)
@@ -143,7 +142,12 @@ struct ar9170_sta_tid {
143 u16 tid; 142 u16 tid;
144 enum ar9170_tid_state state; 143 enum ar9170_tid_state state;
145 bool active; 144 bool active;
146 u8 retry; 145};
146
147struct ar9170_tx_queue_stats {
148 unsigned int len;
149 unsigned int limit;
150 unsigned int count;
147}; 151};
148 152
149#define AR9170_QUEUE_TIMEOUT 64 153#define AR9170_QUEUE_TIMEOUT 64
@@ -154,6 +158,8 @@ struct ar9170_sta_tid {
154 158
155#define AR9170_NUM_TX_STATUS 128 159#define AR9170_NUM_TX_STATUS 128
156#define AR9170_NUM_TX_AGG_MAX 30 160#define AR9170_NUM_TX_AGG_MAX 30
161#define AR9170_NUM_TX_LIMIT_HARD AR9170_TXQ_DEPTH
162#define AR9170_NUM_TX_LIMIT_SOFT (AR9170_TXQ_DEPTH - 10)
157 163
158struct ar9170 { 164struct ar9170 {
159 struct ieee80211_hw *hw; 165 struct ieee80211_hw *hw;
@@ -211,7 +217,7 @@ struct ar9170 {
211 217
212 /* qos queue settings */ 218 /* qos queue settings */
213 spinlock_t tx_stats_lock; 219 spinlock_t tx_stats_lock;
214 struct ieee80211_tx_queue_stats tx_stats[5]; 220 struct ar9170_tx_queue_stats tx_stats[5];
215 struct ieee80211_tx_queue_params edcf[5]; 221 struct ieee80211_tx_queue_params edcf[5];
216 222
217 spinlock_t cmdlock; 223 spinlock_t cmdlock;
@@ -248,13 +254,8 @@ struct ar9170_sta_info {
248 unsigned int ampdu_max_len; 254 unsigned int ampdu_max_len;
249}; 255};
250 256
251#define AR9170_TX_FLAG_WAIT_FOR_ACK BIT(0)
252#define AR9170_TX_FLAG_NO_ACK BIT(1)
253#define AR9170_TX_FLAG_BLOCK_ACK BIT(2)
254
255struct ar9170_tx_info { 257struct ar9170_tx_info {
256 unsigned long timeout; 258 unsigned long timeout;
257 unsigned int flags;
258}; 259};
259 260
260#define IS_STARTED(a) (((struct ar9170 *)a)->state >= AR9170_STARTED) 261#define IS_STARTED(a) (((struct ar9170 *)a)->state >= AR9170_STARTED)
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
index 701ddb7d8400..0a1d4c28e68a 100644
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -276,6 +276,7 @@ struct ar9170_tx_control {
276#define AR9170_TX_MAC_RATE_PROBE 0x8000 276#define AR9170_TX_MAC_RATE_PROBE 0x8000
277 277
278/* either-or */ 278/* either-or */
279#define AR9170_TX_PHY_MOD_MASK 0x00000003
279#define AR9170_TX_PHY_MOD_CCK 0x00000000 280#define AR9170_TX_PHY_MOD_CCK 0x00000000
280#define AR9170_TX_PHY_MOD_OFDM 0x00000001 281#define AR9170_TX_PHY_MOD_OFDM 0x00000001
281#define AR9170_TX_PHY_MOD_HT 0x00000002 282#define AR9170_TX_PHY_MOD_HT 0x00000002
diff --git a/drivers/net/wireless/ath/ar9170/mac.c b/drivers/net/wireless/ath/ar9170/mac.c
index ddc8c09dc79e..857e86104295 100644
--- a/drivers/net/wireless/ath/ar9170/mac.c
+++ b/drivers/net/wireless/ath/ar9170/mac.c
@@ -117,7 +117,7 @@ int ar9170_set_qos(struct ar9170 *ar)
117 ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_TXOP, 117 ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_TXOP,
118 ar->edcf[0].txop | ar->edcf[1].txop << 16); 118 ar->edcf[0].txop | ar->edcf[1].txop << 16);
119 ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_TXOP, 119 ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_TXOP,
120 ar->edcf[1].txop | ar->edcf[3].txop << 16); 120 ar->edcf[2].txop | ar->edcf[3].txop << 16);
121 121
122 ar9170_regwrite_finish(); 122 ar9170_regwrite_finish();
123 123
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index f9d6db8d013e..8a964f130367 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -194,12 +194,15 @@ static inline u16 ar9170_get_seq(struct sk_buff *skb)
194 return ar9170_get_seq_h((void *) txc->frame_data); 194 return ar9170_get_seq_h((void *) txc->frame_data);
195} 195}
196 196
197static inline u16 ar9170_get_tid_h(struct ieee80211_hdr *hdr)
198{
199 return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
200}
201
197static inline u16 ar9170_get_tid(struct sk_buff *skb) 202static inline u16 ar9170_get_tid(struct sk_buff *skb)
198{ 203{
199 struct ar9170_tx_control *txc = (void *) skb->data; 204 struct ar9170_tx_control *txc = (void *) skb->data;
200 struct ieee80211_hdr *hdr = (void *) txc->frame_data; 205 return ar9170_get_tid_h((struct ieee80211_hdr *) txc->frame_data);
201
202 return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
203} 206}
204 207
205#define GET_NEXT_SEQ(seq) ((seq + 1) & 0x0fff) 208#define GET_NEXT_SEQ(seq) ((seq + 1) & 0x0fff)
@@ -213,10 +216,10 @@ static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
213 struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data; 216 struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
214 struct ieee80211_hdr *hdr = (void *) txc->frame_data; 217 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
215 218
216 printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] flags:%x s:%d " 219 printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] s:%d "
217 "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n", 220 "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n",
218 wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb), 221 wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb),
219 ieee80211_get_DA(hdr), arinfo->flags, ar9170_get_seq_h(hdr), 222 ieee80211_get_DA(hdr), ar9170_get_seq_h(hdr),
220 le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control), 223 le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control),
221 jiffies_to_msecs(arinfo->timeout - jiffies)); 224 jiffies_to_msecs(arinfo->timeout - jiffies));
222} 225}
@@ -430,7 +433,7 @@ void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
430 spin_lock_irqsave(&ar->tx_stats_lock, flags); 433 spin_lock_irqsave(&ar->tx_stats_lock, flags);
431 ar->tx_stats[queue].len--; 434 ar->tx_stats[queue].len--;
432 435
433 if (skb_queue_empty(&ar->tx_pending[queue])) { 436 if (ar->tx_stats[queue].len < AR9170_NUM_TX_LIMIT_SOFT) {
434#ifdef AR9170_QUEUE_STOP_DEBUG 437#ifdef AR9170_QUEUE_STOP_DEBUG
435 printk(KERN_DEBUG "%s: wake queue %d\n", 438 printk(KERN_DEBUG "%s: wake queue %d\n",
436 wiphy_name(ar->hw->wiphy), queue); 439 wiphy_name(ar->hw->wiphy), queue);
@@ -440,22 +443,17 @@ void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
440 } 443 }
441 spin_unlock_irqrestore(&ar->tx_stats_lock, flags); 444 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
442 445
443 if (arinfo->flags & AR9170_TX_FLAG_BLOCK_ACK) { 446 if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
444 ar9170_tx_ampdu_callback(ar, skb);
445 } else if (arinfo->flags & AR9170_TX_FLAG_WAIT_FOR_ACK) {
446 arinfo->timeout = jiffies +
447 msecs_to_jiffies(AR9170_TX_TIMEOUT);
448
449 skb_queue_tail(&ar->tx_status[queue], skb);
450 } else if (arinfo->flags & AR9170_TX_FLAG_NO_ACK) {
451 ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED); 447 ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
452 } else { 448 } else {
453#ifdef AR9170_QUEUE_DEBUG 449 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
454 printk(KERN_DEBUG "%s: unsupported frame flags!\n", 450 ar9170_tx_ampdu_callback(ar, skb);
455 wiphy_name(ar->hw->wiphy)); 451 } else {
456 ar9170_print_txheader(ar, skb); 452 arinfo->timeout = jiffies +
457#endif /* AR9170_QUEUE_DEBUG */ 453 msecs_to_jiffies(AR9170_TX_TIMEOUT);
458 dev_kfree_skb_any(skb); 454
455 skb_queue_tail(&ar->tx_status[queue], skb);
456 }
459 } 457 }
460 458
461 if (!ar->tx_stats[queue].len && 459 if (!ar->tx_stats[queue].len &&
@@ -1407,17 +1405,6 @@ static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
1407 1405
1408 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && 1406 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
1409 (is_valid_ether_addr(ieee80211_get_DA(hdr)))) { 1407 (is_valid_ether_addr(ieee80211_get_DA(hdr)))) {
1410 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1411 if (unlikely(!info->control.sta))
1412 goto err_out;
1413
1414 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1415 arinfo->flags = AR9170_TX_FLAG_BLOCK_ACK;
1416
1417 goto out;
1418 }
1419
1420 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
1421 /* 1408 /*
1422 * WARNING: 1409 * WARNING:
1423 * Putting the QoS queue bits into an unexplored territory is 1410 * Putting the QoS queue bits into an unexplored territory is
@@ -1431,12 +1418,17 @@ static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
1431 1418
1432 txc->phy_control |= 1419 txc->phy_control |=
1433 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT); 1420 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
1434 arinfo->flags = AR9170_TX_FLAG_WAIT_FOR_ACK; 1421
1435 } else { 1422 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1436 arinfo->flags = AR9170_TX_FLAG_NO_ACK; 1423 if (unlikely(!info->control.sta))
1424 goto err_out;
1425
1426 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1427 } else {
1428 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
1429 }
1437 } 1430 }
1438 1431
1439out:
1440 return 0; 1432 return 0;
1441 1433
1442err_out: 1434err_out:
@@ -1671,8 +1663,7 @@ static bool ar9170_tx_ampdu(struct ar9170 *ar)
1671 * tell the FW/HW that this is the last frame, 1663 * tell the FW/HW that this is the last frame,
1672 * that way it will wait for the immediate block ack. 1664 * that way it will wait for the immediate block ack.
1673 */ 1665 */
1674 if (likely(skb_peek_tail(&agg))) 1666 ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
1675 ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
1676 1667
1677#ifdef AR9170_TXAGG_DEBUG 1668#ifdef AR9170_TXAGG_DEBUG
1678 printk(KERN_DEBUG "%s: generated A-MPDU looks like this:\n", 1669 printk(KERN_DEBUG "%s: generated A-MPDU looks like this:\n",
@@ -1716,6 +1707,21 @@ static void ar9170_tx(struct ar9170 *ar)
1716 1707
1717 for (i = 0; i < __AR9170_NUM_TXQ; i++) { 1708 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1718 spin_lock_irqsave(&ar->tx_stats_lock, flags); 1709 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1710 frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
1711 skb_queue_len(&ar->tx_pending[i]));
1712
1713 if (remaining_space < frames) {
1714#ifdef AR9170_QUEUE_DEBUG
1715 printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
1716 "remaining slots:%d, needed:%d\n",
1717 wiphy_name(ar->hw->wiphy), i, remaining_space,
1718 frames);
1719#endif /* AR9170_QUEUE_DEBUG */
1720 frames = remaining_space;
1721 }
1722
1723 ar->tx_stats[i].len += frames;
1724 ar->tx_stats[i].count += frames;
1719 if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) { 1725 if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
1720#ifdef AR9170_QUEUE_DEBUG 1726#ifdef AR9170_QUEUE_DEBUG
1721 printk(KERN_DEBUG "%s: queue %d full\n", 1727 printk(KERN_DEBUG "%s: queue %d full\n",
@@ -1733,25 +1739,8 @@ static void ar9170_tx(struct ar9170 *ar)
1733 __ar9170_dump_txstats(ar); 1739 __ar9170_dump_txstats(ar);
1734#endif /* AR9170_QUEUE_STOP_DEBUG */ 1740#endif /* AR9170_QUEUE_STOP_DEBUG */
1735 ieee80211_stop_queue(ar->hw, i); 1741 ieee80211_stop_queue(ar->hw, i);
1736 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1737 continue;
1738 }
1739
1740 frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
1741 skb_queue_len(&ar->tx_pending[i]));
1742
1743 if (remaining_space < frames) {
1744#ifdef AR9170_QUEUE_DEBUG
1745 printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
1746 "remaining slots:%d, needed:%d\n",
1747 wiphy_name(ar->hw->wiphy), i, remaining_space,
1748 frames);
1749#endif /* AR9170_QUEUE_DEBUG */
1750 frames = remaining_space;
1751 } 1742 }
1752 1743
1753 ar->tx_stats[i].len += frames;
1754 ar->tx_stats[i].count += frames;
1755 spin_unlock_irqrestore(&ar->tx_stats_lock, flags); 1744 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1756 1745
1757 if (!frames) 1746 if (!frames)
@@ -1773,7 +1762,7 @@ static void ar9170_tx(struct ar9170 *ar)
1773 arinfo->timeout = jiffies + 1762 arinfo->timeout = jiffies +
1774 msecs_to_jiffies(AR9170_TX_TIMEOUT); 1763 msecs_to_jiffies(AR9170_TX_TIMEOUT);
1775 1764
1776 if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK) 1765 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1777 atomic_inc(&ar->tx_ampdu_pending); 1766 atomic_inc(&ar->tx_ampdu_pending);
1778 1767
1779#ifdef AR9170_QUEUE_DEBUG 1768#ifdef AR9170_QUEUE_DEBUG
@@ -1784,7 +1773,7 @@ static void ar9170_tx(struct ar9170 *ar)
1784 1773
1785 err = ar->tx(ar, skb); 1774 err = ar->tx(ar, skb);
1786 if (unlikely(err)) { 1775 if (unlikely(err)) {
1787 if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK) 1776 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1788 atomic_dec(&ar->tx_ampdu_pending); 1777 atomic_dec(&ar->tx_ampdu_pending);
1789 1778
1790 frames_failed++; 1779 frames_failed++;
@@ -1950,7 +1939,7 @@ err_free:
1950} 1939}
1951 1940
1952static int ar9170_op_add_interface(struct ieee80211_hw *hw, 1941static int ar9170_op_add_interface(struct ieee80211_hw *hw,
1953 struct ieee80211_if_init_conf *conf) 1942 struct ieee80211_vif *vif)
1954{ 1943{
1955 struct ar9170 *ar = hw->priv; 1944 struct ar9170 *ar = hw->priv;
1956 struct ath_common *common = &ar->common; 1945 struct ath_common *common = &ar->common;
@@ -1963,8 +1952,8 @@ static int ar9170_op_add_interface(struct ieee80211_hw *hw,
1963 goto unlock; 1952 goto unlock;
1964 } 1953 }
1965 1954
1966 ar->vif = conf->vif; 1955 ar->vif = vif;
1967 memcpy(common->macaddr, conf->mac_addr, ETH_ALEN); 1956 memcpy(common->macaddr, vif->addr, ETH_ALEN);
1968 1957
1969 if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) { 1958 if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) {
1970 ar->rx_software_decryption = true; 1959 ar->rx_software_decryption = true;
@@ -1984,7 +1973,7 @@ unlock:
1984} 1973}
1985 1974
1986static void ar9170_op_remove_interface(struct ieee80211_hw *hw, 1975static void ar9170_op_remove_interface(struct ieee80211_hw *hw,
1987 struct ieee80211_if_init_conf *conf) 1976 struct ieee80211_vif *vif)
1988{ 1977{
1989 struct ar9170 *ar = hw->priv; 1978 struct ar9170 *ar = hw->priv;
1990 1979
@@ -2340,55 +2329,55 @@ out:
2340 return err; 2329 return err;
2341} 2330}
2342 2331
2343static void ar9170_sta_notify(struct ieee80211_hw *hw, 2332static int ar9170_sta_add(struct ieee80211_hw *hw,
2344 struct ieee80211_vif *vif, 2333 struct ieee80211_vif *vif,
2345 enum sta_notify_cmd cmd, 2334 struct ieee80211_sta *sta)
2346 struct ieee80211_sta *sta)
2347{ 2335{
2348 struct ar9170 *ar = hw->priv; 2336 struct ar9170 *ar = hw->priv;
2349 struct ar9170_sta_info *sta_info = (void *) sta->drv_priv; 2337 struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
2350 unsigned int i; 2338 unsigned int i;
2351 2339
2352 switch (cmd) { 2340 memset(sta_info, 0, sizeof(*sta_info));
2353 case STA_NOTIFY_ADD:
2354 memset(sta_info, 0, sizeof(*sta_info));
2355 2341
2356 if (!sta->ht_cap.ht_supported) 2342 if (!sta->ht_cap.ht_supported)
2357 break; 2343 return 0;
2358 2344
2359 if (sta->ht_cap.ampdu_density > ar->global_ampdu_density) 2345 if (sta->ht_cap.ampdu_density > ar->global_ampdu_density)
2360 ar->global_ampdu_density = sta->ht_cap.ampdu_density; 2346 ar->global_ampdu_density = sta->ht_cap.ampdu_density;
2361 2347
2362 if (sta->ht_cap.ampdu_factor < ar->global_ampdu_factor) 2348 if (sta->ht_cap.ampdu_factor < ar->global_ampdu_factor)
2363 ar->global_ampdu_factor = sta->ht_cap.ampdu_factor; 2349 ar->global_ampdu_factor = sta->ht_cap.ampdu_factor;
2364 2350
2365 for (i = 0; i < AR9170_NUM_TID; i++) { 2351 for (i = 0; i < AR9170_NUM_TID; i++) {
2366 sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN; 2352 sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN;
2367 sta_info->agg[i].active = false; 2353 sta_info->agg[i].active = false;
2368 sta_info->agg[i].ssn = 0; 2354 sta_info->agg[i].ssn = 0;
2369 sta_info->agg[i].retry = 0; 2355 sta_info->agg[i].tid = i;
2370 sta_info->agg[i].tid = i; 2356 INIT_LIST_HEAD(&sta_info->agg[i].list);
2371 INIT_LIST_HEAD(&sta_info->agg[i].list); 2357 skb_queue_head_init(&sta_info->agg[i].queue);
2372 skb_queue_head_init(&sta_info->agg[i].queue); 2358 }
2373 }
2374 2359
2375 sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor); 2360 sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
2376 break;
2377 2361
2378 case STA_NOTIFY_REMOVE: 2362 return 0;
2379 if (!sta->ht_cap.ht_supported) 2363}
2380 break;
2381 2364
2382 for (i = 0; i < AR9170_NUM_TID; i++) { 2365static int ar9170_sta_remove(struct ieee80211_hw *hw,
2383 sta_info->agg[i].state = AR9170_TID_STATE_INVALID; 2366 struct ieee80211_vif *vif,
2384 skb_queue_purge(&sta_info->agg[i].queue); 2367 struct ieee80211_sta *sta)
2385 } 2368{
2369 struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
2370 unsigned int i;
2386 2371
2387 break; 2372 if (!sta->ht_cap.ht_supported)
2373 return 0;
2388 2374
2389 default: 2375 for (i = 0; i < AR9170_NUM_TID; i++) {
2390 break; 2376 sta_info->agg[i].state = AR9170_TID_STATE_INVALID;
2377 skb_queue_purge(&sta_info->agg[i].queue);
2391 } 2378 }
2379
2380 return 0;
2392} 2381}
2393 2382
2394static int ar9170_get_stats(struct ieee80211_hw *hw, 2383static int ar9170_get_stats(struct ieee80211_hw *hw,
@@ -2408,18 +2397,6 @@ static int ar9170_get_stats(struct ieee80211_hw *hw,
2408 return 0; 2397 return 0;
2409} 2398}
2410 2399
2411static int ar9170_get_tx_stats(struct ieee80211_hw *hw,
2412 struct ieee80211_tx_queue_stats *tx_stats)
2413{
2414 struct ar9170 *ar = hw->priv;
2415
2416 spin_lock_bh(&ar->tx_stats_lock);
2417 memcpy(tx_stats, ar->tx_stats, sizeof(tx_stats[0]) * hw->queues);
2418 spin_unlock_bh(&ar->tx_stats_lock);
2419
2420 return 0;
2421}
2422
2423static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue, 2400static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
2424 const struct ieee80211_tx_queue_params *param) 2401 const struct ieee80211_tx_queue_params *param)
2425{ 2402{
@@ -2519,9 +2496,9 @@ static const struct ieee80211_ops ar9170_ops = {
2519 .bss_info_changed = ar9170_op_bss_info_changed, 2496 .bss_info_changed = ar9170_op_bss_info_changed,
2520 .get_tsf = ar9170_op_get_tsf, 2497 .get_tsf = ar9170_op_get_tsf,
2521 .set_key = ar9170_set_key, 2498 .set_key = ar9170_set_key,
2522 .sta_notify = ar9170_sta_notify, 2499 .sta_add = ar9170_sta_add,
2500 .sta_remove = ar9170_sta_remove,
2523 .get_stats = ar9170_get_stats, 2501 .get_stats = ar9170_get_stats,
2524 .get_tx_stats = ar9170_get_tx_stats,
2525 .ampdu_action = ar9170_ampdu_action, 2502 .ampdu_action = ar9170_ampdu_action,
2526}; 2503};
2527 2504
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index e0799d924057..0f361186b78f 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -84,6 +84,8 @@ static struct usb_device_id ar9170_usb_ids[] = {
84 { USB_DEVICE(0x0cde, 0x0023) }, 84 { USB_DEVICE(0x0cde, 0x0023) },
85 /* Z-Com UB82 ABG */ 85 /* Z-Com UB82 ABG */
86 { USB_DEVICE(0x0cde, 0x0026) }, 86 { USB_DEVICE(0x0cde, 0x0026) },
87 /* Sphairon Homelink 1202 */
88 { USB_DEVICE(0x0cde, 0x0027) },
87 /* Arcadyan WN7512 */ 89 /* Arcadyan WN7512 */
88 { USB_DEVICE(0x083a, 0xf522) }, 90 { USB_DEVICE(0x083a, 0xf522) },
89 /* Planex GWUS300 */ 91 /* Planex GWUS300 */
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 9e05648356fe..71fc960814f0 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -74,7 +74,6 @@ struct ath_common;
74 74
75struct ath_bus_ops { 75struct ath_bus_ops {
76 void (*read_cachesize)(struct ath_common *common, int *csz); 76 void (*read_cachesize)(struct ath_common *common, int *csz);
77 void (*cleanup)(struct ath_common *common);
78 bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data); 77 bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
79 void (*bt_coex_prep)(struct ath_common *common); 78 void (*bt_coex_prep)(struct ath_common *common);
80}; 79};
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 6a2a96761111..ac67f02e26d8 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -535,13 +535,12 @@ struct ath5k_txq_info {
535 u32 tqi_cbr_period; /* Constant bit rate period */ 535 u32 tqi_cbr_period; /* Constant bit rate period */
536 u32 tqi_cbr_overflow_limit; 536 u32 tqi_cbr_overflow_limit;
537 u32 tqi_burst_time; 537 u32 tqi_burst_time;
538 u32 tqi_ready_time; /* Not used */ 538 u32 tqi_ready_time; /* Time queue waits after an event */
539}; 539};
540 540
541/* 541/*
542 * Transmit packet types. 542 * Transmit packet types.
543 * used on tx control descriptor 543 * used on tx control descriptor
544 * TODO: Use them inside base.c corectly
545 */ 544 */
546enum ath5k_pkt_type { 545enum ath5k_pkt_type {
547 AR5K_PKT_TYPE_NORMAL = 0, 546 AR5K_PKT_TYPE_NORMAL = 0,
@@ -1063,6 +1062,7 @@ struct ath5k_hw {
1063 u32 ah_cw_min; 1062 u32 ah_cw_min;
1064 u32 ah_cw_max; 1063 u32 ah_cw_max;
1065 u32 ah_limit_tx_retries; 1064 u32 ah_limit_tx_retries;
1065 u8 ah_coverage_class;
1066 1066
1067 /* Antenna Control */ 1067 /* Antenna Control */
1068 u32 ah_ant_ctl[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX]; 1068 u32 ah_ant_ctl[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
@@ -1200,6 +1200,7 @@ extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah);
1200 1200
1201/* Protocol Control Unit Functions */ 1201/* Protocol Control Unit Functions */
1202extern int ath5k_hw_set_opmode(struct ath5k_hw *ah); 1202extern int ath5k_hw_set_opmode(struct ath5k_hw *ah);
1203extern void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
1203/* BSSID Functions */ 1204/* BSSID Functions */
1204extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac); 1205extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
1205extern void ath5k_hw_set_associd(struct ath5k_hw *ah); 1206extern void ath5k_hw_set_associd(struct ath5k_hw *ah);
@@ -1231,6 +1232,10 @@ extern int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout);
1231extern unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah); 1232extern unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah);
1232extern int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout); 1233extern int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout);
1233extern unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah); 1234extern unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah);
1235/* Clock rate related functions */
1236unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec);
1237unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock);
1238unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah);
1234/* Key table (WEP) functions */ 1239/* Key table (WEP) functions */
1235extern int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry); 1240extern int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry);
1236extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry); 1241extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry);
@@ -1310,24 +1315,6 @@ extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower);
1310 * Functions used internaly 1315 * Functions used internaly
1311 */ 1316 */
1312 1317
1313/*
1314 * Translate usec to hw clock units
1315 * TODO: Half/quarter rate
1316 */
1317static inline unsigned int ath5k_hw_htoclock(unsigned int usec, bool turbo)
1318{
1319 return turbo ? (usec * 80) : (usec * 40);
1320}
1321
1322/*
1323 * Translate hw clock units to usec
1324 * TODO: Half/quarter rate
1325 */
1326static inline unsigned int ath5k_hw_clocktoh(unsigned int clock, bool turbo)
1327{
1328 return turbo ? (clock / 80) : (clock / 40);
1329}
1330
1331static inline struct ath_common *ath5k_hw_common(struct ath5k_hw *ah) 1318static inline struct ath_common *ath5k_hw_common(struct ath5k_hw *ah)
1332{ 1319{
1333 return &ah->common; 1320 return &ah->common;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index e63b7c40d0ee..8dce0077b023 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -83,7 +83,7 @@ MODULE_VERSION("0.6.0 (EXPERIMENTAL)");
83 83
84 84
85/* Known PCI ids */ 85/* Known PCI ids */
86static const struct pci_device_id ath5k_pci_id_table[] = { 86static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
87 { PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */ 87 { PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */
88 { PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */ 88 { PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */
89 { PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/ 89 { PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/
@@ -225,9 +225,9 @@ static int ath5k_reset_wake(struct ath5k_softc *sc);
225static int ath5k_start(struct ieee80211_hw *hw); 225static int ath5k_start(struct ieee80211_hw *hw);
226static void ath5k_stop(struct ieee80211_hw *hw); 226static void ath5k_stop(struct ieee80211_hw *hw);
227static int ath5k_add_interface(struct ieee80211_hw *hw, 227static int ath5k_add_interface(struct ieee80211_hw *hw,
228 struct ieee80211_if_init_conf *conf); 228 struct ieee80211_vif *vif);
229static void ath5k_remove_interface(struct ieee80211_hw *hw, 229static void ath5k_remove_interface(struct ieee80211_hw *hw,
230 struct ieee80211_if_init_conf *conf); 230 struct ieee80211_vif *vif);
231static int ath5k_config(struct ieee80211_hw *hw, u32 changed); 231static int ath5k_config(struct ieee80211_hw *hw, u32 changed);
232static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw, 232static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
233 int mc_count, struct dev_addr_list *mc_list); 233 int mc_count, struct dev_addr_list *mc_list);
@@ -241,8 +241,6 @@ static int ath5k_set_key(struct ieee80211_hw *hw,
241 struct ieee80211_key_conf *key); 241 struct ieee80211_key_conf *key);
242static int ath5k_get_stats(struct ieee80211_hw *hw, 242static int ath5k_get_stats(struct ieee80211_hw *hw,
243 struct ieee80211_low_level_stats *stats); 243 struct ieee80211_low_level_stats *stats);
244static int ath5k_get_tx_stats(struct ieee80211_hw *hw,
245 struct ieee80211_tx_queue_stats *stats);
246static u64 ath5k_get_tsf(struct ieee80211_hw *hw); 244static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
247static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf); 245static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf);
248static void ath5k_reset_tsf(struct ieee80211_hw *hw); 246static void ath5k_reset_tsf(struct ieee80211_hw *hw);
@@ -254,6 +252,8 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
254 u32 changes); 252 u32 changes);
255static void ath5k_sw_scan_start(struct ieee80211_hw *hw); 253static void ath5k_sw_scan_start(struct ieee80211_hw *hw);
256static void ath5k_sw_scan_complete(struct ieee80211_hw *hw); 254static void ath5k_sw_scan_complete(struct ieee80211_hw *hw);
255static void ath5k_set_coverage_class(struct ieee80211_hw *hw,
256 u8 coverage_class);
257 257
258static const struct ieee80211_ops ath5k_hw_ops = { 258static const struct ieee80211_ops ath5k_hw_ops = {
259 .tx = ath5k_tx, 259 .tx = ath5k_tx,
@@ -267,13 +267,13 @@ static const struct ieee80211_ops ath5k_hw_ops = {
267 .set_key = ath5k_set_key, 267 .set_key = ath5k_set_key,
268 .get_stats = ath5k_get_stats, 268 .get_stats = ath5k_get_stats,
269 .conf_tx = NULL, 269 .conf_tx = NULL,
270 .get_tx_stats = ath5k_get_tx_stats,
271 .get_tsf = ath5k_get_tsf, 270 .get_tsf = ath5k_get_tsf,
272 .set_tsf = ath5k_set_tsf, 271 .set_tsf = ath5k_set_tsf,
273 .reset_tsf = ath5k_reset_tsf, 272 .reset_tsf = ath5k_reset_tsf,
274 .bss_info_changed = ath5k_bss_info_changed, 273 .bss_info_changed = ath5k_bss_info_changed,
275 .sw_scan_start = ath5k_sw_scan_start, 274 .sw_scan_start = ath5k_sw_scan_start,
276 .sw_scan_complete = ath5k_sw_scan_complete, 275 .sw_scan_complete = ath5k_sw_scan_complete,
276 .set_coverage_class = ath5k_set_coverage_class,
277}; 277};
278 278
279/* 279/*
@@ -1246,6 +1246,29 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1246 return 0; 1246 return 0;
1247} 1247}
1248 1248
1249static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1250{
1251 struct ieee80211_hdr *hdr;
1252 enum ath5k_pkt_type htype;
1253 __le16 fc;
1254
1255 hdr = (struct ieee80211_hdr *)skb->data;
1256 fc = hdr->frame_control;
1257
1258 if (ieee80211_is_beacon(fc))
1259 htype = AR5K_PKT_TYPE_BEACON;
1260 else if (ieee80211_is_probe_resp(fc))
1261 htype = AR5K_PKT_TYPE_PROBE_RESP;
1262 else if (ieee80211_is_atim(fc))
1263 htype = AR5K_PKT_TYPE_ATIM;
1264 else if (ieee80211_is_pspoll(fc))
1265 htype = AR5K_PKT_TYPE_PSPOLL;
1266 else
1267 htype = AR5K_PKT_TYPE_NORMAL;
1268
1269 return htype;
1270}
1271
1249static int 1272static int
1250ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf, 1273ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
1251 struct ath5k_txq *txq) 1274 struct ath5k_txq *txq)
@@ -1300,7 +1323,8 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
1300 sc->vif, pktlen, info)); 1323 sc->vif, pktlen, info));
1301 } 1324 }
1302 ret = ah->ah_setup_tx_desc(ah, ds, pktlen, 1325 ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
1303 ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL, 1326 ieee80211_get_hdrlen_from_skb(skb),
1327 get_hw_packet_type(skb),
1304 (sc->power_level * 2), 1328 (sc->power_level * 2),
1305 hw_rate, 1329 hw_rate,
1306 info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags, 1330 info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
@@ -1329,7 +1353,6 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
1329 1353
1330 spin_lock_bh(&txq->lock); 1354 spin_lock_bh(&txq->lock);
1331 list_add_tail(&bf->list, &txq->q); 1355 list_add_tail(&bf->list, &txq->q);
1332 sc->tx_stats[txq->qnum].len++;
1333 if (txq->link == NULL) /* is this first packet? */ 1356 if (txq->link == NULL) /* is this first packet? */
1334 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr); 1357 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
1335 else /* no, so only link it */ 1358 else /* no, so only link it */
@@ -1513,7 +1536,8 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
1513 1536
1514 ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi); 1537 ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi);
1515 if (ret) 1538 if (ret)
1516 return ret; 1539 goto err;
1540
1517 if (sc->opmode == NL80211_IFTYPE_AP || 1541 if (sc->opmode == NL80211_IFTYPE_AP ||
1518 sc->opmode == NL80211_IFTYPE_MESH_POINT) { 1542 sc->opmode == NL80211_IFTYPE_MESH_POINT) {
1519 /* 1543 /*
@@ -1540,10 +1564,25 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
1540 if (ret) { 1564 if (ret) {
1541 ATH5K_ERR(sc, "%s: unable to update parameters for beacon " 1565 ATH5K_ERR(sc, "%s: unable to update parameters for beacon "
1542 "hardware queue!\n", __func__); 1566 "hardware queue!\n", __func__);
1543 return ret; 1567 goto err;
1544 } 1568 }
1569 ret = ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */
1570 if (ret)
1571 goto err;
1572
1573 /* reconfigure cabq with ready time to 80% of beacon_interval */
1574 ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
1575 if (ret)
1576 goto err;
1577
1578 qi.tqi_ready_time = (sc->bintval * 80) / 100;
1579 ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
1580 if (ret)
1581 goto err;
1545 1582
1546 return ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */; 1583 ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
1584err:
1585 return ret;
1547} 1586}
1548 1587
1549static void 1588static void
@@ -1562,7 +1601,6 @@ ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1562 ath5k_txbuf_free(sc, bf); 1601 ath5k_txbuf_free(sc, bf);
1563 1602
1564 spin_lock_bh(&sc->txbuflock); 1603 spin_lock_bh(&sc->txbuflock);
1565 sc->tx_stats[txq->qnum].len--;
1566 list_move_tail(&bf->list, &sc->txbuf); 1604 list_move_tail(&bf->list, &sc->txbuf);
1567 sc->txbuf_len++; 1605 sc->txbuf_len++;
1568 spin_unlock_bh(&sc->txbuflock); 1606 spin_unlock_bh(&sc->txbuflock);
@@ -1992,10 +2030,8 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1992 } 2030 }
1993 2031
1994 ieee80211_tx_status(sc->hw, skb); 2032 ieee80211_tx_status(sc->hw, skb);
1995 sc->tx_stats[txq->qnum].count++;
1996 2033
1997 spin_lock(&sc->txbuflock); 2034 spin_lock(&sc->txbuflock);
1998 sc->tx_stats[txq->qnum].len--;
1999 list_move_tail(&bf->list, &sc->txbuf); 2035 list_move_tail(&bf->list, &sc->txbuf);
2000 sc->txbuf_len++; 2036 sc->txbuf_len++;
2001 spin_unlock(&sc->txbuflock); 2037 spin_unlock(&sc->txbuflock);
@@ -2773,7 +2809,7 @@ static void ath5k_stop(struct ieee80211_hw *hw)
2773} 2809}
2774 2810
2775static int ath5k_add_interface(struct ieee80211_hw *hw, 2811static int ath5k_add_interface(struct ieee80211_hw *hw,
2776 struct ieee80211_if_init_conf *conf) 2812 struct ieee80211_vif *vif)
2777{ 2813{
2778 struct ath5k_softc *sc = hw->priv; 2814 struct ath5k_softc *sc = hw->priv;
2779 int ret; 2815 int ret;
@@ -2784,22 +2820,22 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
2784 goto end; 2820 goto end;
2785 } 2821 }
2786 2822
2787 sc->vif = conf->vif; 2823 sc->vif = vif;
2788 2824
2789 switch (conf->type) { 2825 switch (vif->type) {
2790 case NL80211_IFTYPE_AP: 2826 case NL80211_IFTYPE_AP:
2791 case NL80211_IFTYPE_STATION: 2827 case NL80211_IFTYPE_STATION:
2792 case NL80211_IFTYPE_ADHOC: 2828 case NL80211_IFTYPE_ADHOC:
2793 case NL80211_IFTYPE_MESH_POINT: 2829 case NL80211_IFTYPE_MESH_POINT:
2794 case NL80211_IFTYPE_MONITOR: 2830 case NL80211_IFTYPE_MONITOR:
2795 sc->opmode = conf->type; 2831 sc->opmode = vif->type;
2796 break; 2832 break;
2797 default: 2833 default:
2798 ret = -EOPNOTSUPP; 2834 ret = -EOPNOTSUPP;
2799 goto end; 2835 goto end;
2800 } 2836 }
2801 2837
2802 ath5k_hw_set_lladdr(sc->ah, conf->mac_addr); 2838 ath5k_hw_set_lladdr(sc->ah, vif->addr);
2803 ath5k_mode_setup(sc); 2839 ath5k_mode_setup(sc);
2804 2840
2805 ret = 0; 2841 ret = 0;
@@ -2810,13 +2846,13 @@ end:
2810 2846
2811static void 2847static void
2812ath5k_remove_interface(struct ieee80211_hw *hw, 2848ath5k_remove_interface(struct ieee80211_hw *hw,
2813 struct ieee80211_if_init_conf *conf) 2849 struct ieee80211_vif *vif)
2814{ 2850{
2815 struct ath5k_softc *sc = hw->priv; 2851 struct ath5k_softc *sc = hw->priv;
2816 u8 mac[ETH_ALEN] = {}; 2852 u8 mac[ETH_ALEN] = {};
2817 2853
2818 mutex_lock(&sc->lock); 2854 mutex_lock(&sc->lock);
2819 if (sc->vif != conf->vif) 2855 if (sc->vif != vif)
2820 goto end; 2856 goto end;
2821 2857
2822 ath5k_hw_set_lladdr(sc->ah, mac); 2858 ath5k_hw_set_lladdr(sc->ah, mac);
@@ -3097,17 +3133,6 @@ ath5k_get_stats(struct ieee80211_hw *hw,
3097 return 0; 3133 return 0;
3098} 3134}
3099 3135
3100static int
3101ath5k_get_tx_stats(struct ieee80211_hw *hw,
3102 struct ieee80211_tx_queue_stats *stats)
3103{
3104 struct ath5k_softc *sc = hw->priv;
3105
3106 memcpy(stats, &sc->tx_stats, sizeof(sc->tx_stats));
3107
3108 return 0;
3109}
3110
3111static u64 3136static u64
3112ath5k_get_tsf(struct ieee80211_hw *hw) 3137ath5k_get_tsf(struct ieee80211_hw *hw)
3113{ 3138{
@@ -3262,3 +3287,22 @@ static void ath5k_sw_scan_complete(struct ieee80211_hw *hw)
3262 ath5k_hw_set_ledstate(sc->ah, sc->assoc ? 3287 ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
3263 AR5K_LED_ASSOC : AR5K_LED_INIT); 3288 AR5K_LED_ASSOC : AR5K_LED_INIT);
3264} 3289}
3290
3291/**
3292 * ath5k_set_coverage_class - Set IEEE 802.11 coverage class
3293 *
3294 * @hw: struct ieee80211_hw pointer
3295 * @coverage_class: IEEE 802.11 coverage class number
3296 *
3297 * Mac80211 callback. Sets slot time, ACK timeout and CTS timeout for given
3298 * coverage class. The values are persistent, they are restored after device
3299 * reset.
3300 */
3301static void ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
3302{
3303 struct ath5k_softc *sc = hw->priv;
3304
3305 mutex_lock(&sc->lock);
3306 ath5k_hw_set_coverage_class(sc->ah, coverage_class);
3307 mutex_unlock(&sc->lock);
3308}
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 952b3a21bbc3..7e1a88a5abdb 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -117,7 +117,6 @@ struct ath5k_softc {
117 struct pci_dev *pdev; /* for dma mapping */ 117 struct pci_dev *pdev; /* for dma mapping */
118 void __iomem *iobase; /* address of the device */ 118 void __iomem *iobase; /* address of the device */
119 struct mutex lock; /* dev-level lock */ 119 struct mutex lock; /* dev-level lock */
120 struct ieee80211_tx_queue_stats tx_stats[AR5K_NUM_TX_QUEUES];
121 struct ieee80211_low_level_stats ll_stats; 120 struct ieee80211_low_level_stats ll_stats;
122 struct ieee80211_hw *hw; /* IEEE 802.11 common */ 121 struct ieee80211_hw *hw; /* IEEE 802.11 common */
123 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; 122 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index 60f547503d75..67aa52e9bf94 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -77,6 +77,8 @@ static const struct pci_device_id ath5k_led_devices[] = {
77 { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) }, 77 { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) },
78 /* HP Compaq C700 (nitrousnrg@gmail.com) */ 78 /* HP Compaq C700 (nitrousnrg@gmail.com) */
79 { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) }, 79 { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) },
80 /* LiteOn AR5BXB63 (magooz@salug.it) */
81 { ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) },
80 /* IBM-specific AR5212 (all others) */ 82 /* IBM-specific AR5212 (all others) */
81 { PCI_VDEVICE(ATHEROS, PCI_DEVICE_ID_ATHEROS_AR5212_IBM), ATH_LED(0, 0) }, 83 { PCI_VDEVICE(ATHEROS, PCI_DEVICE_ID_ATHEROS_AR5212_IBM), ATH_LED(0, 0) },
82 /* Dell Vostro A860 (shahar@shahar-or.co.il) */ 84 /* Dell Vostro A860 (shahar@shahar-or.co.il) */
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 64fc1eb9b6d9..aefe84f9c04b 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -187,8 +187,8 @@ unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
187{ 187{
188 ATH5K_TRACE(ah->ah_sc); 188 ATH5K_TRACE(ah->ah_sc);
189 189
190 return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah, 190 return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
191 AR5K_TIME_OUT), AR5K_TIME_OUT_ACK), ah->ah_turbo); 191 AR5K_TIME_OUT), AR5K_TIME_OUT_ACK));
192} 192}
193 193
194/** 194/**
@@ -200,12 +200,12 @@ unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
200int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout) 200int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
201{ 201{
202 ATH5K_TRACE(ah->ah_sc); 202 ATH5K_TRACE(ah->ah_sc);
203 if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK), 203 if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
204 ah->ah_turbo) <= timeout) 204 <= timeout)
205 return -EINVAL; 205 return -EINVAL;
206 206
207 AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK, 207 AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK,
208 ath5k_hw_htoclock(timeout, ah->ah_turbo)); 208 ath5k_hw_htoclock(ah, timeout));
209 209
210 return 0; 210 return 0;
211} 211}
@@ -218,8 +218,8 @@ int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
218unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah) 218unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
219{ 219{
220 ATH5K_TRACE(ah->ah_sc); 220 ATH5K_TRACE(ah->ah_sc);
221 return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah, 221 return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
222 AR5K_TIME_OUT), AR5K_TIME_OUT_CTS), ah->ah_turbo); 222 AR5K_TIME_OUT), AR5K_TIME_OUT_CTS));
223} 223}
224 224
225/** 225/**
@@ -231,17 +231,97 @@ unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
231int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout) 231int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
232{ 232{
233 ATH5K_TRACE(ah->ah_sc); 233 ATH5K_TRACE(ah->ah_sc);
234 if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS), 234 if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
235 ah->ah_turbo) <= timeout) 235 <= timeout)
236 return -EINVAL; 236 return -EINVAL;
237 237
238 AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS, 238 AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS,
239 ath5k_hw_htoclock(timeout, ah->ah_turbo)); 239 ath5k_hw_htoclock(ah, timeout));
240 240
241 return 0; 241 return 0;
242} 242}
243 243
244/** 244/**
245 * ath5k_hw_htoclock - Translate usec to hw clock units
246 *
247 * @ah: The &struct ath5k_hw
248 * @usec: value in microseconds
249 */
250unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
251{
252 return usec * ath5k_hw_get_clockrate(ah);
253}
254
255/**
256 * ath5k_hw_clocktoh - Translate hw clock units to usec
257 * @clock: value in hw clock units
258 */
259unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
260{
261 return clock / ath5k_hw_get_clockrate(ah);
262}
263
264/**
265 * ath5k_hw_get_clockrate - Get the clock rate for current mode
266 *
267 * @ah: The &struct ath5k_hw
268 */
269unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah)
270{
271 struct ieee80211_channel *channel = ah->ah_current_channel;
272 int clock;
273
274 if (channel->hw_value & CHANNEL_5GHZ)
275 clock = 40; /* 802.11a */
276 else if (channel->hw_value & CHANNEL_CCK)
277 clock = 22; /* 802.11b */
278 else
279 clock = 44; /* 802.11g */
280
281 /* Clock rate in turbo modes is twice the normal rate */
282 if (channel->hw_value & CHANNEL_TURBO)
283 clock *= 2;
284
285 return clock;
286}
287
288/**
289 * ath5k_hw_get_default_slottime - Get the default slot time for current mode
290 *
291 * @ah: The &struct ath5k_hw
292 */
293unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
294{
295 struct ieee80211_channel *channel = ah->ah_current_channel;
296
297 if (channel->hw_value & CHANNEL_TURBO)
298 return 6; /* both turbo modes */
299
300 if (channel->hw_value & CHANNEL_CCK)
301 return 20; /* 802.11b */
302
303 return 9; /* 802.11 a/g */
304}
305
306/**
307 * ath5k_hw_get_default_sifs - Get the default SIFS for current mode
308 *
309 * @ah: The &struct ath5k_hw
310 */
311unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
312{
313 struct ieee80211_channel *channel = ah->ah_current_channel;
314
315 if (channel->hw_value & CHANNEL_TURBO)
316 return 8; /* both turbo modes */
317
318 if (channel->hw_value & CHANNEL_5GHZ)
319 return 16; /* 802.11a */
320
321 return 10; /* 802.11 b/g */
322}
323
324/**
245 * ath5k_hw_set_lladdr - Set station id 325 * ath5k_hw_set_lladdr - Set station id
246 * 326 *
247 * @ah: The &struct ath5k_hw 327 * @ah: The &struct ath5k_hw
@@ -1050,3 +1130,24 @@ int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
1050 return 0; 1130 return 0;
1051} 1131}
1052 1132
1133/**
1134 * ath5k_hw_set_coverage_class - Set IEEE 802.11 coverage class
1135 *
1136 * @ah: The &struct ath5k_hw
1137 * @coverage_class: IEEE 802.11 coverage class number
1138 *
1139 * Sets slot time, ACK timeout and CTS timeout for given coverage class.
1140 */
1141void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
1142{
1143 /* As defined by IEEE 802.11-2007 17.3.8.6 */
1144 int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class;
1145 int ack_timeout = ath5k_hw_get_default_sifs(ah) + slot_time;
1146 int cts_timeout = ack_timeout;
1147
1148 ath5k_hw_set_slot_time(ah, slot_time);
1149 ath5k_hw_set_ack_timeout(ah, ack_timeout);
1150 ath5k_hw_set_cts_timeout(ah, cts_timeout);
1151
1152 ah->ah_coverage_class = coverage_class;
1153}
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index eeebb9aef206..9122a8556f45 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -408,12 +408,13 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
408 break; 408 break;
409 409
410 case AR5K_TX_QUEUE_CAB: 410 case AR5K_TX_QUEUE_CAB:
411 /* XXX: use BCN_SENT_GT, if we can figure out how */
411 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 412 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
412 AR5K_QCU_MISC_FRSHED_BCN_SENT_GT | 413 AR5K_QCU_MISC_FRSHED_DBA_GT |
413 AR5K_QCU_MISC_CBREXP_DIS | 414 AR5K_QCU_MISC_CBREXP_DIS |
414 AR5K_QCU_MISC_CBREXP_BCN_DIS); 415 AR5K_QCU_MISC_CBREXP_BCN_DIS);
415 416
416 ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL - 417 ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
417 (AR5K_TUNE_SW_BEACON_RESP - 418 (AR5K_TUNE_SW_BEACON_RESP -
418 AR5K_TUNE_DMA_BEACON_RESP) - 419 AR5K_TUNE_DMA_BEACON_RESP) -
419 AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) | 420 AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
@@ -520,12 +521,16 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
520 */ 521 */
521unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah) 522unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
522{ 523{
524 unsigned int slot_time_clock;
525
523 ATH5K_TRACE(ah->ah_sc); 526 ATH5K_TRACE(ah->ah_sc);
527
524 if (ah->ah_version == AR5K_AR5210) 528 if (ah->ah_version == AR5K_AR5210)
525 return ath5k_hw_clocktoh(ath5k_hw_reg_read(ah, 529 slot_time_clock = ath5k_hw_reg_read(ah, AR5K_SLOT_TIME);
526 AR5K_SLOT_TIME) & 0xffff, ah->ah_turbo);
527 else 530 else
528 return ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT) & 0xffff; 531 slot_time_clock = ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT);
532
533 return ath5k_hw_clocktoh(ah, slot_time_clock & 0xffff);
529} 534}
530 535
531/* 536/*
@@ -533,15 +538,17 @@ unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
533 */ 538 */
534int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time) 539int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
535{ 540{
541 u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
542
536 ATH5K_TRACE(ah->ah_sc); 543 ATH5K_TRACE(ah->ah_sc);
537 if (slot_time < AR5K_SLOT_TIME_9 || slot_time > AR5K_SLOT_TIME_MAX) 544
545 if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
538 return -EINVAL; 546 return -EINVAL;
539 547
540 if (ah->ah_version == AR5K_AR5210) 548 if (ah->ah_version == AR5K_AR5210)
541 ath5k_hw_reg_write(ah, ath5k_hw_htoclock(slot_time, 549 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
542 ah->ah_turbo), AR5K_SLOT_TIME);
543 else 550 else
544 ath5k_hw_reg_write(ah, slot_time, AR5K_DCU_GBL_IFS_SLOT); 551 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
545 552
546 return 0; 553 return 0;
547} 554}
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 62954fc77869..a35a7db0fc4c 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -60,12 +60,11 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
60 !(channel->hw_value & CHANNEL_OFDM)); 60 !(channel->hw_value & CHANNEL_OFDM));
61 61
62 /* Get coefficient 62 /* Get coefficient
63 * ALGO: coef = (5 * clock * carrier_freq) / 2) 63 * ALGO: coef = (5 * clock / carrier_freq) / 2
64 * we scale coef by shifting clock value by 24 for 64 * we scale coef by shifting clock value by 24 for
65 * better precision since we use integers */ 65 * better precision since we use integers */
66 /* TODO: Half/quarter rate */ 66 /* TODO: Half/quarter rate */
67 clock = ath5k_hw_htoclock(1, channel->hw_value & CHANNEL_TURBO); 67 clock = (channel->hw_value & CHANNEL_TURBO) ? 80 : 40;
68
69 coef_scaled = ((5 * (clock << 24)) / 2) / channel->center_freq; 68 coef_scaled = ((5 * (clock << 24)) / 2) / channel->center_freq;
70 69
71 /* Get exponent 70 /* Get exponent
@@ -1317,6 +1316,10 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1317 /* Restore antenna mode */ 1316 /* Restore antenna mode */
1318 ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode); 1317 ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
1319 1318
1319 /* Restore slot time and ACK timeouts */
1320 if (ah->ah_coverage_class > 0)
1321 ath5k_hw_set_coverage_class(ah, ah->ah_coverage_class);
1322
1320 /* 1323 /*
1321 * Configure QCUs/DCUs 1324 * Configure QCUs/DCUs
1322 */ 1325 */
@@ -1371,8 +1374,9 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1371 * Set clocks to 32KHz operation and use an 1374 * Set clocks to 32KHz operation and use an
1372 * external 32KHz crystal when sleeping if one 1375 * external 32KHz crystal when sleeping if one
1373 * exists */ 1376 * exists */
1374 if (ah->ah_version == AR5K_AR5212) 1377 if (ah->ah_version == AR5K_AR5212 &&
1375 ath5k_hw_set_sleep_clock(ah, true); 1378 ah->ah_op_mode != NL80211_IFTYPE_AP)
1379 ath5k_hw_set_sleep_clock(ah, true);
1376 1380
1377 /* 1381 /*
1378 * Disable beacons and reset the register 1382 * Disable beacons and reset the register
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 4985b2b1b0a9..6b50d5eb9ec3 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -1,4 +1,6 @@
1ath9k-y += beacon.o \ 1ath9k-y += beacon.o \
2 gpio.o \
3 init.o \
2 main.o \ 4 main.o \
3 recv.o \ 5 recv.o \
4 xmit.o \ 6 xmit.o \
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 329e6bc137ab..ca4994f13151 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -27,12 +27,6 @@ static void ath_ahb_read_cachesize(struct ath_common *common, int *csz)
27 *csz = L1_CACHE_BYTES >> 2; 27 *csz = L1_CACHE_BYTES >> 2;
28} 28}
29 29
30static void ath_ahb_cleanup(struct ath_common *common)
31{
32 struct ath_softc *sc = (struct ath_softc *)common->priv;
33 iounmap(sc->mem);
34}
35
36static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data) 30static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
37{ 31{
38 struct ath_softc *sc = (struct ath_softc *)common->priv; 32 struct ath_softc *sc = (struct ath_softc *)common->priv;
@@ -54,8 +48,6 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
54 48
55static struct ath_bus_ops ath_ahb_bus_ops = { 49static struct ath_bus_ops ath_ahb_bus_ops = {
56 .read_cachesize = ath_ahb_read_cachesize, 50 .read_cachesize = ath_ahb_read_cachesize,
57 .cleanup = ath_ahb_cleanup,
58
59 .eeprom_read = ath_ahb_eeprom_read, 51 .eeprom_read = ath_ahb_eeprom_read,
60}; 52};
61 53
@@ -121,16 +113,19 @@ static int ath_ahb_probe(struct platform_device *pdev)
121 sc->mem = mem; 113 sc->mem = mem;
122 sc->irq = irq; 114 sc->irq = irq;
123 115
124 ret = ath_init_device(AR5416_AR9100_DEVID, sc, 0x0, &ath_ahb_bus_ops); 116 /* Will be cleared in ath9k_start() */
117 sc->sc_flags |= SC_OP_INVALID;
118
119 ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
125 if (ret) { 120 if (ret) {
126 dev_err(&pdev->dev, "failed to initialize device\n"); 121 dev_err(&pdev->dev, "request_irq failed\n");
127 goto err_free_hw; 122 goto err_free_hw;
128 } 123 }
129 124
130 ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc); 125 ret = ath9k_init_device(AR5416_AR9100_DEVID, sc, 0x0, &ath_ahb_bus_ops);
131 if (ret) { 126 if (ret) {
132 dev_err(&pdev->dev, "request_irq failed\n"); 127 dev_err(&pdev->dev, "failed to initialize device\n");
133 goto err_detach; 128 goto err_irq;
134 } 129 }
135 130
136 ah = sc->sc_ah; 131 ah = sc->sc_ah;
@@ -143,8 +138,8 @@ static int ath_ahb_probe(struct platform_device *pdev)
143 138
144 return 0; 139 return 0;
145 140
146 err_detach: 141 err_irq:
147 ath_detach(sc); 142 free_irq(irq, sc);
148 err_free_hw: 143 err_free_hw:
149 ieee80211_free_hw(hw); 144 ieee80211_free_hw(hw);
150 platform_set_drvdata(pdev, NULL); 145 platform_set_drvdata(pdev, NULL);
@@ -161,8 +156,12 @@ static int ath_ahb_remove(struct platform_device *pdev)
161 if (hw) { 156 if (hw) {
162 struct ath_wiphy *aphy = hw->priv; 157 struct ath_wiphy *aphy = hw->priv;
163 struct ath_softc *sc = aphy->sc; 158 struct ath_softc *sc = aphy->sc;
159 void __iomem *mem = sc->mem;
164 160
165 ath_cleanup(sc); 161 ath9k_deinit_device(sc);
162 free_irq(sc->irq, sc);
163 ieee80211_free_hw(sc->hw);
164 iounmap(mem);
166 platform_set_drvdata(pdev, NULL); 165 platform_set_drvdata(pdev, NULL);
167 } 166 }
168 167
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 1597a42731ed..83c7ea4c007f 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -267,6 +267,7 @@ void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
267 u16 tid, u16 *ssn); 267 u16 tid, u16 *ssn);
268void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid); 268void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
269void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid); 269void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
270void ath9k_enable_ps(struct ath_softc *sc);
270 271
271/********/ 272/********/
272/* VIFs */ 273/* VIFs */
@@ -341,6 +342,12 @@ int ath_beaconq_config(struct ath_softc *sc);
341#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */ 342#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
342#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */ 343#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
343 344
345void ath_ani_calibrate(unsigned long data);
346
347/**********/
348/* BTCOEX */
349/**********/
350
344/* Defines the BT AR_BT_COEX_WGHT used */ 351/* Defines the BT AR_BT_COEX_WGHT used */
345enum ath_stomp_type { 352enum ath_stomp_type {
346 ATH_BTCOEX_NO_STOMP, 353 ATH_BTCOEX_NO_STOMP,
@@ -358,9 +365,14 @@ struct ath_btcoex {
358 int bt_stomp_type; /* Types of BT stomping */ 365 int bt_stomp_type; /* Types of BT stomping */
359 u32 btcoex_no_stomp; /* in usec */ 366 u32 btcoex_no_stomp; /* in usec */
360 u32 btcoex_period; /* in usec */ 367 u32 btcoex_period; /* in usec */
368 u32 btscan_no_stomp; /* in usec */
361 struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */ 369 struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
362}; 370};
363 371
372int ath_init_btcoex_timer(struct ath_softc *sc);
373void ath9k_btcoex_timer_resume(struct ath_softc *sc);
374void ath9k_btcoex_timer_pause(struct ath_softc *sc);
375
364/********************/ 376/********************/
365/* LED Control */ 377/* LED Control */
366/********************/ 378/********************/
@@ -385,6 +397,9 @@ struct ath_led {
385 bool registered; 397 bool registered;
386}; 398};
387 399
400void ath_init_leds(struct ath_softc *sc);
401void ath_deinit_leds(struct ath_softc *sc);
402
388/********************/ 403/********************/
389/* Main driver core */ 404/* Main driver core */
390/********************/ 405/********************/
@@ -403,26 +418,29 @@ struct ath_led {
403#define ATH_TXPOWER_MAX 100 /* .5 dBm units */ 418#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
404#define ATH_RATE_DUMMY_MARKER 0 419#define ATH_RATE_DUMMY_MARKER 0
405 420
406#define SC_OP_INVALID BIT(0) 421#define SC_OP_INVALID BIT(0)
407#define SC_OP_BEACONS BIT(1) 422#define SC_OP_BEACONS BIT(1)
408#define SC_OP_RXAGGR BIT(2) 423#define SC_OP_RXAGGR BIT(2)
409#define SC_OP_TXAGGR BIT(3) 424#define SC_OP_TXAGGR BIT(3)
410#define SC_OP_FULL_RESET BIT(4) 425#define SC_OP_FULL_RESET BIT(4)
411#define SC_OP_PREAMBLE_SHORT BIT(5) 426#define SC_OP_PREAMBLE_SHORT BIT(5)
412#define SC_OP_PROTECT_ENABLE BIT(6) 427#define SC_OP_PROTECT_ENABLE BIT(6)
413#define SC_OP_RXFLUSH BIT(7) 428#define SC_OP_RXFLUSH BIT(7)
414#define SC_OP_LED_ASSOCIATED BIT(8) 429#define SC_OP_LED_ASSOCIATED BIT(8)
415#define SC_OP_WAIT_FOR_BEACON BIT(12) 430#define SC_OP_LED_ON BIT(9)
416#define SC_OP_LED_ON BIT(13) 431#define SC_OP_SCANNING BIT(10)
417#define SC_OP_SCANNING BIT(14) 432#define SC_OP_TSF_RESET BIT(11)
418#define SC_OP_TSF_RESET BIT(15) 433#define SC_OP_BT_PRIORITY_DETECTED BIT(12)
419#define SC_OP_WAIT_FOR_CAB BIT(16) 434#define SC_OP_BT_SCAN BIT(13)
420#define SC_OP_WAIT_FOR_PSPOLL_DATA BIT(17) 435
421#define SC_OP_WAIT_FOR_TX_ACK BIT(18) 436/* Powersave flags */
422#define SC_OP_BEACON_SYNC BIT(19) 437#define PS_WAIT_FOR_BEACON BIT(0)
423#define SC_OP_BT_PRIORITY_DETECTED BIT(21) 438#define PS_WAIT_FOR_CAB BIT(1)
424#define SC_OP_NULLFUNC_COMPLETED BIT(22) 439#define PS_WAIT_FOR_PSPOLL_DATA BIT(2)
425#define SC_OP_PS_ENABLED BIT(23) 440#define PS_WAIT_FOR_TX_ACK BIT(3)
441#define PS_BEACON_SYNC BIT(4)
442#define PS_NULLFUNC_COMPLETED BIT(5)
443#define PS_ENABLED BIT(6)
426 444
427struct ath_wiphy; 445struct ath_wiphy;
428struct ath_rate_table; 446struct ath_rate_table;
@@ -453,16 +471,17 @@ struct ath_softc {
453 int irq; 471 int irq;
454 spinlock_t sc_resetlock; 472 spinlock_t sc_resetlock;
455 spinlock_t sc_serial_rw; 473 spinlock_t sc_serial_rw;
456 spinlock_t ani_lock;
457 spinlock_t sc_pm_lock; 474 spinlock_t sc_pm_lock;
458 struct mutex mutex; 475 struct mutex mutex;
459 476
460 u32 intrstatus; 477 u32 intrstatus;
461 u32 sc_flags; /* SC_OP_* */ 478 u32 sc_flags; /* SC_OP_* */
479 u16 ps_flags; /* PS_* */
462 u16 curtxpow; 480 u16 curtxpow;
463 u8 nbcnvifs; 481 u8 nbcnvifs;
464 u16 nvifs; 482 u16 nvifs;
465 bool ps_enabled; 483 bool ps_enabled;
484 bool ps_idle;
466 unsigned long ps_usecount; 485 unsigned long ps_usecount;
467 enum ath9k_int imask; 486 enum ath9k_int imask;
468 487
@@ -509,6 +528,7 @@ struct ath_wiphy {
509 int chan_is_ht; 528 int chan_is_ht;
510}; 529};
511 530
531void ath9k_tasklet(unsigned long data);
512int ath_reset(struct ath_softc *sc, bool retry_tx); 532int ath_reset(struct ath_softc *sc, bool retry_tx);
513int ath_get_hal_qnum(u16 queue, struct ath_softc *sc); 533int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
514int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc); 534int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
@@ -519,21 +539,16 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
519 common->bus_ops->read_cachesize(common, csz); 539 common->bus_ops->read_cachesize(common, csz);
520} 540}
521 541
522static inline void ath_bus_cleanup(struct ath_common *common)
523{
524 common->bus_ops->cleanup(common);
525}
526
527extern struct ieee80211_ops ath9k_ops; 542extern struct ieee80211_ops ath9k_ops;
543extern int modparam_nohwcrypt;
528 544
529irqreturn_t ath_isr(int irq, void *dev); 545irqreturn_t ath_isr(int irq, void *dev);
530void ath_cleanup(struct ath_softc *sc); 546int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
531int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
532 const struct ath_bus_ops *bus_ops); 547 const struct ath_bus_ops *bus_ops);
533void ath_detach(struct ath_softc *sc); 548void ath9k_deinit_device(struct ath_softc *sc);
534const char *ath_mac_bb_name(u32 mac_bb_version); 549const char *ath_mac_bb_name(u32 mac_bb_version);
535const char *ath_rf_name(u16 rf_version); 550const char *ath_rf_name(u16 rf_version);
536void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw); 551void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
537void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw, 552void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
538 struct ath9k_channel *ichan); 553 struct ath9k_channel *ichan);
539void ath_update_chainmask(struct ath_softc *sc, int is_ht); 554void ath_update_chainmask(struct ath_softc *sc, int is_ht);
@@ -542,6 +557,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
542 557
543void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw); 558void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw);
544void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw); 559void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
560bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode);
545 561
546#ifdef CONFIG_PCI 562#ifdef CONFIG_PCI
547int ath_pci_init(void); 563int ath_pci_init(void);
@@ -583,4 +599,8 @@ void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
583void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue); 599void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
584 600
585int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype); 601int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
602
603void ath_start_rfkill_poll(struct ath_softc *sc);
604extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
605
586#endif /* ATH9K_H */ 606#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 1660ef17aaf5..b4a31a43a62c 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -62,7 +62,7 @@ int ath_beaconq_config(struct ath_softc *sc)
62 * Beacons are always sent out at the lowest rate, and are not retried. 62 * Beacons are always sent out at the lowest rate, and are not retried.
63*/ 63*/
64static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp, 64static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
65 struct ath_buf *bf) 65 struct ath_buf *bf, int rateidx)
66{ 66{
67 struct sk_buff *skb = bf->bf_mpdu; 67 struct sk_buff *skb = bf->bf_mpdu;
68 struct ath_hw *ah = sc->sc_ah; 68 struct ath_hw *ah = sc->sc_ah;
@@ -96,9 +96,9 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
96 ds->ds_data = bf->bf_buf_addr; 96 ds->ds_data = bf->bf_buf_addr;
97 97
98 sband = &sc->sbands[common->hw->conf.channel->band]; 98 sband = &sc->sbands[common->hw->conf.channel->band];
99 rate = sband->bitrates[0].hw_value; 99 rate = sband->bitrates[rateidx].hw_value;
100 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT) 100 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
101 rate |= sband->bitrates[0].hw_value_short; 101 rate |= sband->bitrates[rateidx].hw_value_short;
102 102
103 ath9k_hw_set11n_txdesc(ah, ds, skb->len + FCS_LEN, 103 ath9k_hw_set11n_txdesc(ah, ds, skb->len + FCS_LEN,
104 ATH9K_PKT_TYPE_BEACON, 104 ATH9K_PKT_TYPE_BEACON,
@@ -206,7 +206,7 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
206 } 206 }
207 } 207 }
208 208
209 ath_beacon_setup(sc, avp, bf); 209 ath_beacon_setup(sc, avp, bf, info->control.rates[0].idx);
210 210
211 while (skb) { 211 while (skb) {
212 ath_tx_cabq(hw, skb); 212 ath_tx_cabq(hw, skb);
@@ -237,7 +237,7 @@ static void ath_beacon_start_adhoc(struct ath_softc *sc,
237 bf = avp->av_bcbuf; 237 bf = avp->av_bcbuf;
238 skb = bf->bf_mpdu; 238 skb = bf->bf_mpdu;
239 239
240 ath_beacon_setup(sc, avp, bf); 240 ath_beacon_setup(sc, avp, bf, 0);
241 241
242 /* NB: caller is known to have already stopped tx dma */ 242 /* NB: caller is known to have already stopped tx dma */
243 ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bf->bf_daddr); 243 ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bf->bf_daddr);
@@ -480,7 +480,8 @@ void ath_beacon_tasklet(unsigned long data)
480 sc->beacon.updateslot = COMMIT; /* commit next beacon */ 480 sc->beacon.updateslot = COMMIT; /* commit next beacon */
481 sc->beacon.slotupdate = slot; 481 sc->beacon.slotupdate = slot;
482 } else if (sc->beacon.updateslot == COMMIT && sc->beacon.slotupdate == slot) { 482 } else if (sc->beacon.updateslot == COMMIT && sc->beacon.slotupdate == slot) {
483 ath9k_hw_setslottime(sc->sc_ah, sc->beacon.slottime); 483 ah->slottime = sc->beacon.slottime;
484 ath9k_hw_init_global_settings(ah);
484 sc->beacon.updateslot = OK; 485 sc->beacon.updateslot = OK;
485 } 486 }
486 if (bfaddr != 0) { 487 if (bfaddr != 0) {
@@ -525,16 +526,13 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
525{ 526{
526 u32 nexttbtt, intval; 527 u32 nexttbtt, intval;
527 528
528 /* Configure the timers only when the TSF has to be reset */
529
530 if (!(sc->sc_flags & SC_OP_TSF_RESET))
531 return;
532
533 /* NB: the beacon interval is kept internally in TU's */ 529 /* NB: the beacon interval is kept internally in TU's */
534 intval = conf->beacon_interval & ATH9K_BEACON_PERIOD; 530 intval = conf->beacon_interval & ATH9K_BEACON_PERIOD;
535 intval /= ATH_BCBUF; /* for staggered beacons */ 531 intval /= ATH_BCBUF; /* for staggered beacons */
536 nexttbtt = intval; 532 nexttbtt = intval;
537 intval |= ATH9K_BEACON_RESET_TSF; 533
534 if (sc->sc_flags & SC_OP_TSF_RESET)
535 intval |= ATH9K_BEACON_RESET_TSF;
538 536
539 /* 537 /*
540 * In AP mode we enable the beacon timers and SWBA interrupts to 538 * In AP mode we enable the beacon timers and SWBA interrupts to
@@ -576,6 +574,13 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
576 u64 tsf; 574 u64 tsf;
577 int num_beacons, offset, dtim_dec_count, cfp_dec_count; 575 int num_beacons, offset, dtim_dec_count, cfp_dec_count;
578 576
577 /* No need to configure beacon if we are not associated */
578 if (!common->curaid) {
579 ath_print(common, ATH_DBG_BEACON,
580 "STA is not yet associated..skipping beacon config\n");
581 return;
582 }
583
579 memset(&bs, 0, sizeof(bs)); 584 memset(&bs, 0, sizeof(bs));
580 intval = conf->beacon_interval & ATH9K_BEACON_PERIOD; 585 intval = conf->beacon_interval & ATH9K_BEACON_PERIOD;
581 586
@@ -738,7 +743,6 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
738 enum nl80211_iftype iftype; 743 enum nl80211_iftype iftype;
739 744
740 /* Setup the beacon configuration parameters */ 745 /* Setup the beacon configuration parameters */
741
742 if (vif) { 746 if (vif) {
743 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 747 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
744 748
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 1ba31a73317c..1ee5a15ccbb1 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -25,10 +25,12 @@
25 25
26#define ATH_BTCOEX_DEF_BT_PERIOD 45 26#define ATH_BTCOEX_DEF_BT_PERIOD 45
27#define ATH_BTCOEX_DEF_DUTY_CYCLE 55 27#define ATH_BTCOEX_DEF_DUTY_CYCLE 55
28#define ATH_BTCOEX_BTSCAN_DUTY_CYCLE 90
28#define ATH_BTCOEX_BMISS_THRESH 50 29#define ATH_BTCOEX_BMISS_THRESH 50
29 30
30#define ATH_BT_PRIORITY_TIME_THRESHOLD 1000 /* ms */ 31#define ATH_BT_PRIORITY_TIME_THRESHOLD 1000 /* ms */
31#define ATH_BT_CNT_THRESHOLD 3 32#define ATH_BT_CNT_THRESHOLD 3
33#define ATH_BT_CNT_SCAN_THRESHOLD 15
32 34
33enum ath_btcoex_scheme { 35enum ath_btcoex_scheme {
34 ATH_BTCOEX_CFG_NONE, 36 ATH_BTCOEX_CFG_NONE,
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index b66f72dbf7b9..42d2a506845a 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -75,17 +75,24 @@ static const struct file_operations fops_debug = {
75 75
76#endif 76#endif
77 77
78#define DMA_BUF_LEN 1024
79
78static ssize_t read_file_dma(struct file *file, char __user *user_buf, 80static ssize_t read_file_dma(struct file *file, char __user *user_buf,
79 size_t count, loff_t *ppos) 81 size_t count, loff_t *ppos)
80{ 82{
81 struct ath_softc *sc = file->private_data; 83 struct ath_softc *sc = file->private_data;
82 struct ath_hw *ah = sc->sc_ah; 84 struct ath_hw *ah = sc->sc_ah;
83 char buf[1024]; 85 char *buf;
86 int retval;
84 unsigned int len = 0; 87 unsigned int len = 0;
85 u32 val[ATH9K_NUM_DMA_DEBUG_REGS]; 88 u32 val[ATH9K_NUM_DMA_DEBUG_REGS];
86 int i, qcuOffset = 0, dcuOffset = 0; 89 int i, qcuOffset = 0, dcuOffset = 0;
87 u32 *qcuBase = &val[0], *dcuBase = &val[4]; 90 u32 *qcuBase = &val[0], *dcuBase = &val[4];
88 91
92 buf = kmalloc(DMA_BUF_LEN, GFP_KERNEL);
93 if (!buf)
94 return 0;
95
89 ath9k_ps_wakeup(sc); 96 ath9k_ps_wakeup(sc);
90 97
91 REG_WRITE_D(ah, AR_MACMISC, 98 REG_WRITE_D(ah, AR_MACMISC,
@@ -93,20 +100,20 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
93 (AR_MACMISC_MISC_OBS_BUS_1 << 100 (AR_MACMISC_MISC_OBS_BUS_1 <<
94 AR_MACMISC_MISC_OBS_BUS_MSB_S))); 101 AR_MACMISC_MISC_OBS_BUS_MSB_S)));
95 102
96 len += snprintf(buf + len, sizeof(buf) - len, 103 len += snprintf(buf + len, DMA_BUF_LEN - len,
97 "Raw DMA Debug values:\n"); 104 "Raw DMA Debug values:\n");
98 105
99 for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) { 106 for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
100 if (i % 4 == 0) 107 if (i % 4 == 0)
101 len += snprintf(buf + len, sizeof(buf) - len, "\n"); 108 len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
102 109
103 val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32))); 110 val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32)));
104 len += snprintf(buf + len, sizeof(buf) - len, "%d: %08x ", 111 len += snprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ",
105 i, val[i]); 112 i, val[i]);
106 } 113 }
107 114
108 len += snprintf(buf + len, sizeof(buf) - len, "\n\n"); 115 len += snprintf(buf + len, DMA_BUF_LEN - len, "\n\n");
109 len += snprintf(buf + len, sizeof(buf) - len, 116 len += snprintf(buf + len, DMA_BUF_LEN - len,
110 "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n"); 117 "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
111 118
112 for (i = 0; i < ATH9K_NUM_QUEUES; i++, qcuOffset += 4, dcuOffset += 5) { 119 for (i = 0; i < ATH9K_NUM_QUEUES; i++, qcuOffset += 4, dcuOffset += 5) {
@@ -120,7 +127,7 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
120 dcuBase++; 127 dcuBase++;
121 } 128 }
122 129
123 len += snprintf(buf + len, sizeof(buf) - len, 130 len += snprintf(buf + len, DMA_BUF_LEN - len,
124 "%2d %2x %1x %2x %2x\n", 131 "%2d %2x %1x %2x %2x\n",
125 i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset, 132 i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
126 (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3), 133 (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
@@ -128,35 +135,37 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
128 (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset); 135 (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
129 } 136 }
130 137
131 len += snprintf(buf + len, sizeof(buf) - len, "\n"); 138 len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
132 139
133 len += snprintf(buf + len, sizeof(buf) - len, 140 len += snprintf(buf + len, DMA_BUF_LEN - len,
134 "qcu_stitch state: %2x qcu_fetch state: %2x\n", 141 "qcu_stitch state: %2x qcu_fetch state: %2x\n",
135 (val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22); 142 (val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
136 len += snprintf(buf + len, sizeof(buf) - len, 143 len += snprintf(buf + len, DMA_BUF_LEN - len,
137 "qcu_complete state: %2x dcu_complete state: %2x\n", 144 "qcu_complete state: %2x dcu_complete state: %2x\n",
138 (val[3] & 0x1c000000) >> 26, (val[6] & 0x3)); 145 (val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
139 len += snprintf(buf + len, sizeof(buf) - len, 146 len += snprintf(buf + len, DMA_BUF_LEN - len,
140 "dcu_arb state: %2x dcu_fp state: %2x\n", 147 "dcu_arb state: %2x dcu_fp state: %2x\n",
141 (val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27); 148 (val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
142 len += snprintf(buf + len, sizeof(buf) - len, 149 len += snprintf(buf + len, DMA_BUF_LEN - len,
143 "chan_idle_dur: %3d chan_idle_dur_valid: %1d\n", 150 "chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
144 (val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10); 151 (val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
145 len += snprintf(buf + len, sizeof(buf) - len, 152 len += snprintf(buf + len, DMA_BUF_LEN - len,
146 "txfifo_valid_0: %1d txfifo_valid_1: %1d\n", 153 "txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
147 (val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12); 154 (val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
148 len += snprintf(buf + len, sizeof(buf) - len, 155 len += snprintf(buf + len, DMA_BUF_LEN - len,
149 "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n", 156 "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
150 (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17); 157 (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
151 158
152 len += snprintf(buf + len, sizeof(buf) - len, "pcu observe: 0x%x \n", 159 len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x \n",
153 REG_READ_D(ah, AR_OBS_BUS_1)); 160 REG_READ_D(ah, AR_OBS_BUS_1));
154 len += snprintf(buf + len, sizeof(buf) - len, 161 len += snprintf(buf + len, DMA_BUF_LEN - len,
155 "AR_CR: 0x%x \n", REG_READ_D(ah, AR_CR)); 162 "AR_CR: 0x%x \n", REG_READ_D(ah, AR_CR));
156 163
157 ath9k_ps_restore(sc); 164 ath9k_ps_restore(sc);
158 165
159 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 166 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
167 kfree(buf);
168 return retval;
160} 169}
161 170
162static const struct file_operations fops_dma = { 171static const struct file_operations fops_dma = {
@@ -289,23 +298,49 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
289 if (sc->cur_rate_table == NULL) 298 if (sc->cur_rate_table == NULL)
290 return 0; 299 return 0;
291 300
292 max = 80 + sc->cur_rate_table->rate_cnt * 64; 301 max = 80 + sc->cur_rate_table->rate_cnt * 1024;
293 buf = kmalloc(max + 1, GFP_KERNEL); 302 buf = kmalloc(max + 1, GFP_KERNEL);
294 if (buf == NULL) 303 if (buf == NULL)
295 return 0; 304 return 0;
296 buf[max] = 0; 305 buf[max] = 0;
297 306
298 len += sprintf(buf, "%5s %15s %8s %9s %3s\n\n", "Rate", "Success", 307 len += sprintf(buf, "%6s %6s %6s "
299 "Retries", "XRetries", "PER"); 308 "%10s %10s %10s %10s\n",
309 "HT", "MCS", "Rate",
310 "Success", "Retries", "XRetries", "PER");
300 311
301 for (i = 0; i < sc->cur_rate_table->rate_cnt; i++) { 312 for (i = 0; i < sc->cur_rate_table->rate_cnt; i++) {
302 u32 ratekbps = sc->cur_rate_table->info[i].ratekbps; 313 u32 ratekbps = sc->cur_rate_table->info[i].ratekbps;
303 struct ath_rc_stats *stats = &sc->debug.stats.rcstats[i]; 314 struct ath_rc_stats *stats = &sc->debug.stats.rcstats[i];
315 char mcs[5];
316 char htmode[5];
317 int used_mcs = 0, used_htmode = 0;
318
319 if (WLAN_RC_PHY_HT(sc->cur_rate_table->info[i].phy)) {
320 used_mcs = snprintf(mcs, 5, "%d",
321 sc->cur_rate_table->info[i].ratecode);
322
323 if (WLAN_RC_PHY_40(sc->cur_rate_table->info[i].phy))
324 used_htmode = snprintf(htmode, 5, "HT40");
325 else if (WLAN_RC_PHY_20(sc->cur_rate_table->info[i].phy))
326 used_htmode = snprintf(htmode, 5, "HT20");
327 else
328 used_htmode = snprintf(htmode, 5, "????");
329 }
330
331 mcs[used_mcs] = '\0';
332 htmode[used_htmode] = '\0';
304 333
305 len += snprintf(buf + len, max - len, 334 len += snprintf(buf + len, max - len,
306 "%3u.%d: %8u %8u %8u %8u\n", ratekbps / 1000, 335 "%6s %6s %3u.%d: "
307 (ratekbps % 1000) / 100, stats->success, 336 "%10u %10u %10u %10u\n",
308 stats->retries, stats->xretries, 337 htmode,
338 mcs,
339 ratekbps / 1000,
340 (ratekbps % 1000) / 100,
341 stats->success,
342 stats->retries,
343 stats->xretries,
309 stats->per); 344 stats->per);
310 } 345 }
311 346
@@ -554,6 +589,116 @@ static const struct file_operations fops_xmit = {
554 .owner = THIS_MODULE 589 .owner = THIS_MODULE
555}; 590};
556 591
592static ssize_t read_file_recv(struct file *file, char __user *user_buf,
593 size_t count, loff_t *ppos)
594{
595#define PHY_ERR(s, p) \
596 len += snprintf(buf + len, size - len, "%18s : %10u\n", s, \
597 sc->debug.stats.rxstats.phy_err_stats[p]);
598
599 struct ath_softc *sc = file->private_data;
600 char *buf;
601 unsigned int len = 0, size = 1152;
602 ssize_t retval = 0;
603
604 buf = kzalloc(size, GFP_KERNEL);
605 if (buf == NULL)
606 return 0;
607
608 len += snprintf(buf + len, size - len,
609 "%18s : %10u\n", "CRC ERR",
610 sc->debug.stats.rxstats.crc_err);
611 len += snprintf(buf + len, size - len,
612 "%18s : %10u\n", "DECRYPT CRC ERR",
613 sc->debug.stats.rxstats.decrypt_crc_err);
614 len += snprintf(buf + len, size - len,
615 "%18s : %10u\n", "PHY ERR",
616 sc->debug.stats.rxstats.phy_err);
617 len += snprintf(buf + len, size - len,
618 "%18s : %10u\n", "MIC ERR",
619 sc->debug.stats.rxstats.mic_err);
620 len += snprintf(buf + len, size - len,
621 "%18s : %10u\n", "PRE-DELIM CRC ERR",
622 sc->debug.stats.rxstats.pre_delim_crc_err);
623 len += snprintf(buf + len, size - len,
624 "%18s : %10u\n", "POST-DELIM CRC ERR",
625 sc->debug.stats.rxstats.post_delim_crc_err);
626 len += snprintf(buf + len, size - len,
627 "%18s : %10u\n", "DECRYPT BUSY ERR",
628 sc->debug.stats.rxstats.decrypt_busy_err);
629
630 PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
631 PHY_ERR("TIMING", ATH9K_PHYERR_TIMING);
632 PHY_ERR("PARITY", ATH9K_PHYERR_PARITY);
633 PHY_ERR("RATE", ATH9K_PHYERR_RATE);
634 PHY_ERR("LENGTH", ATH9K_PHYERR_LENGTH);
635 PHY_ERR("RADAR", ATH9K_PHYERR_RADAR);
636 PHY_ERR("SERVICE", ATH9K_PHYERR_SERVICE);
637 PHY_ERR("TOR", ATH9K_PHYERR_TOR);
638 PHY_ERR("OFDM-TIMING", ATH9K_PHYERR_OFDM_TIMING);
639 PHY_ERR("OFDM-SIGNAL-PARITY", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
640 PHY_ERR("OFDM-RATE", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
641 PHY_ERR("OFDM-LENGTH", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
642 PHY_ERR("OFDM-POWER-DROP", ATH9K_PHYERR_OFDM_POWER_DROP);
643 PHY_ERR("OFDM-SERVICE", ATH9K_PHYERR_OFDM_SERVICE);
644 PHY_ERR("OFDM-RESTART", ATH9K_PHYERR_OFDM_RESTART);
645 PHY_ERR("FALSE-RADAR-EXT", ATH9K_PHYERR_FALSE_RADAR_EXT);
646 PHY_ERR("CCK-TIMING", ATH9K_PHYERR_CCK_TIMING);
647 PHY_ERR("CCK-HEADER-CRC", ATH9K_PHYERR_CCK_HEADER_CRC);
648 PHY_ERR("CCK-RATE", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
649 PHY_ERR("CCK-SERVICE", ATH9K_PHYERR_CCK_SERVICE);
650 PHY_ERR("CCK-RESTART", ATH9K_PHYERR_CCK_RESTART);
651 PHY_ERR("CCK-LENGTH", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
652 PHY_ERR("CCK-POWER-DROP", ATH9K_PHYERR_CCK_POWER_DROP);
653 PHY_ERR("HT-CRC", ATH9K_PHYERR_HT_CRC_ERROR);
654 PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
655 PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL);
656
657 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
658 kfree(buf);
659
660 return retval;
661
662#undef PHY_ERR
663}
664
665void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf)
666{
667#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
668#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
669
670 struct ath_desc *ds = bf->bf_desc;
671 u32 phyerr;
672
673 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
674 RX_STAT_INC(crc_err);
675 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT)
676 RX_STAT_INC(decrypt_crc_err);
677 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC)
678 RX_STAT_INC(mic_err);
679 if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_PRE)
680 RX_STAT_INC(pre_delim_crc_err);
681 if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_POST)
682 RX_STAT_INC(post_delim_crc_err);
683 if (ds->ds_rxstat.rs_status & ATH9K_RX_DECRYPT_BUSY)
684 RX_STAT_INC(decrypt_busy_err);
685
686 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
687 RX_STAT_INC(phy_err);
688 phyerr = ds->ds_rxstat.rs_phyerr & 0x24;
689 RX_PHY_ERR_INC(phyerr);
690 }
691
692#undef RX_STAT_INC
693#undef RX_PHY_ERR_INC
694}
695
696static const struct file_operations fops_recv = {
697 .read = read_file_recv,
698 .open = ath9k_debugfs_open,
699 .owner = THIS_MODULE
700};
701
557int ath9k_init_debug(struct ath_hw *ah) 702int ath9k_init_debug(struct ath_hw *ah)
558{ 703{
559 struct ath_common *common = ath9k_hw_common(ah); 704 struct ath_common *common = ath9k_hw_common(ah);
@@ -606,6 +751,13 @@ int ath9k_init_debug(struct ath_hw *ah)
606 if (!sc->debug.debugfs_xmit) 751 if (!sc->debug.debugfs_xmit)
607 goto err; 752 goto err;
608 753
754 sc->debug.debugfs_recv = debugfs_create_file("recv",
755 S_IRUSR,
756 sc->debug.debugfs_phy,
757 sc, &fops_recv);
758 if (!sc->debug.debugfs_recv)
759 goto err;
760
609 return 0; 761 return 0;
610err: 762err:
611 ath9k_exit_debug(ah); 763 ath9k_exit_debug(ah);
@@ -617,6 +769,7 @@ void ath9k_exit_debug(struct ath_hw *ah)
617 struct ath_common *common = ath9k_hw_common(ah); 769 struct ath_common *common = ath9k_hw_common(ah);
618 struct ath_softc *sc = (struct ath_softc *) common->priv; 770 struct ath_softc *sc = (struct ath_softc *) common->priv;
619 771
772 debugfs_remove(sc->debug.debugfs_recv);
620 debugfs_remove(sc->debug.debugfs_xmit); 773 debugfs_remove(sc->debug.debugfs_xmit);
621 debugfs_remove(sc->debug.debugfs_wiphy); 774 debugfs_remove(sc->debug.debugfs_wiphy);
622 debugfs_remove(sc->debug.debugfs_rcstat); 775 debugfs_remove(sc->debug.debugfs_rcstat);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 536663e3ee11..86780e68b31e 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -116,10 +116,35 @@ struct ath_tx_stats {
116 u32 delim_underrun; 116 u32 delim_underrun;
117}; 117};
118 118
119/**
120 * struct ath_rx_stats - RX Statistics
121 * @crc_err: No. of frames with incorrect CRC value
122 * @decrypt_crc_err: No. of frames whose CRC check failed after
123 decryption process completed
124 * @phy_err: No. of frames whose reception failed because the PHY
125 encountered an error
126 * @mic_err: No. of frames with incorrect TKIP MIC verification failure
127 * @pre_delim_crc_err: Pre-Frame delimiter CRC error detections
128 * @post_delim_crc_err: Post-Frame delimiter CRC error detections
129 * @decrypt_busy_err: Decryption interruptions counter
130 * @phy_err_stats: Individual PHY error statistics
131 */
132struct ath_rx_stats {
133 u32 crc_err;
134 u32 decrypt_crc_err;
135 u32 phy_err;
136 u32 mic_err;
137 u32 pre_delim_crc_err;
138 u32 post_delim_crc_err;
139 u32 decrypt_busy_err;
140 u32 phy_err_stats[ATH9K_PHYERR_MAX];
141};
142
119struct ath_stats { 143struct ath_stats {
120 struct ath_interrupt_stats istats; 144 struct ath_interrupt_stats istats;
121 struct ath_rc_stats rcstats[RATE_TABLE_SIZE]; 145 struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
122 struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES]; 146 struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
147 struct ath_rx_stats rxstats;
123}; 148};
124 149
125struct ath9k_debug { 150struct ath9k_debug {
@@ -130,6 +155,7 @@ struct ath9k_debug {
130 struct dentry *debugfs_rcstat; 155 struct dentry *debugfs_rcstat;
131 struct dentry *debugfs_wiphy; 156 struct dentry *debugfs_wiphy;
132 struct dentry *debugfs_xmit; 157 struct dentry *debugfs_xmit;
158 struct dentry *debugfs_recv;
133 struct ath_stats stats; 159 struct ath_stats stats;
134}; 160};
135 161
@@ -142,6 +168,7 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
142void ath_debug_stat_rc(struct ath_softc *sc, int final_rate); 168void ath_debug_stat_rc(struct ath_softc *sc, int final_rate);
143void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq, 169void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
144 struct ath_buf *bf); 170 struct ath_buf *bf);
171void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf);
145void ath_debug_stat_retries(struct ath_softc *sc, int rix, 172void ath_debug_stat_retries(struct ath_softc *sc, int rix,
146 int xretries, int retries, u8 per); 173 int xretries, int retries, u8 per);
147 174
@@ -181,6 +208,11 @@ static inline void ath_debug_stat_tx(struct ath_softc *sc,
181{ 208{
182} 209}
183 210
211static inline void ath_debug_stat_rx(struct ath_softc *sc,
212 struct ath_buf *bf)
213{
214}
215
184static inline void ath_debug_stat_retries(struct ath_softc *sc, int rix, 216static inline void ath_debug_stat_retries(struct ath_softc *sc, int rix,
185 int xretries, int retries, u8 per) 217 int xretries, int retries, u8 per)
186{ 218{
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
new file mode 100644
index 000000000000..deab8beb0680
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -0,0 +1,442 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "ath9k.h"
18
19/********************************/
20/* LED functions */
21/********************************/
22
23static void ath_led_blink_work(struct work_struct *work)
24{
25 struct ath_softc *sc = container_of(work, struct ath_softc,
26 ath_led_blink_work.work);
27
28 if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
29 return;
30
31 if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
32 (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
33 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
34 else
35 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
36 (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
37
38 ieee80211_queue_delayed_work(sc->hw,
39 &sc->ath_led_blink_work,
40 (sc->sc_flags & SC_OP_LED_ON) ?
41 msecs_to_jiffies(sc->led_off_duration) :
42 msecs_to_jiffies(sc->led_on_duration));
43
44 sc->led_on_duration = sc->led_on_cnt ?
45 max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
46 ATH_LED_ON_DURATION_IDLE;
47 sc->led_off_duration = sc->led_off_cnt ?
48 max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
49 ATH_LED_OFF_DURATION_IDLE;
50 sc->led_on_cnt = sc->led_off_cnt = 0;
51 if (sc->sc_flags & SC_OP_LED_ON)
52 sc->sc_flags &= ~SC_OP_LED_ON;
53 else
54 sc->sc_flags |= SC_OP_LED_ON;
55}
56
57static void ath_led_brightness(struct led_classdev *led_cdev,
58 enum led_brightness brightness)
59{
60 struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
61 struct ath_softc *sc = led->sc;
62
63 switch (brightness) {
64 case LED_OFF:
65 if (led->led_type == ATH_LED_ASSOC ||
66 led->led_type == ATH_LED_RADIO) {
67 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
68 (led->led_type == ATH_LED_RADIO));
69 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
70 if (led->led_type == ATH_LED_RADIO)
71 sc->sc_flags &= ~SC_OP_LED_ON;
72 } else {
73 sc->led_off_cnt++;
74 }
75 break;
76 case LED_FULL:
77 if (led->led_type == ATH_LED_ASSOC) {
78 sc->sc_flags |= SC_OP_LED_ASSOCIATED;
79 ieee80211_queue_delayed_work(sc->hw,
80 &sc->ath_led_blink_work, 0);
81 } else if (led->led_type == ATH_LED_RADIO) {
82 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
83 sc->sc_flags |= SC_OP_LED_ON;
84 } else {
85 sc->led_on_cnt++;
86 }
87 break;
88 default:
89 break;
90 }
91}
92
93static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
94 char *trigger)
95{
96 int ret;
97
98 led->sc = sc;
99 led->led_cdev.name = led->name;
100 led->led_cdev.default_trigger = trigger;
101 led->led_cdev.brightness_set = ath_led_brightness;
102
103 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
104 if (ret)
105 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
106 "Failed to register led:%s", led->name);
107 else
108 led->registered = 1;
109 return ret;
110}
111
112static void ath_unregister_led(struct ath_led *led)
113{
114 if (led->registered) {
115 led_classdev_unregister(&led->led_cdev);
116 led->registered = 0;
117 }
118}
119
120void ath_deinit_leds(struct ath_softc *sc)
121{
122 ath_unregister_led(&sc->assoc_led);
123 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
124 ath_unregister_led(&sc->tx_led);
125 ath_unregister_led(&sc->rx_led);
126 ath_unregister_led(&sc->radio_led);
127 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
128}
129
130void ath_init_leds(struct ath_softc *sc)
131{
132 char *trigger;
133 int ret;
134
135 if (AR_SREV_9287(sc->sc_ah))
136 sc->sc_ah->led_pin = ATH_LED_PIN_9287;
137 else
138 sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
139
140 /* Configure gpio 1 for output */
141 ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
142 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
143 /* LED off, active low */
144 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
145
146 INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
147
148 trigger = ieee80211_get_radio_led_name(sc->hw);
149 snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
150 "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
151 ret = ath_register_led(sc, &sc->radio_led, trigger);
152 sc->radio_led.led_type = ATH_LED_RADIO;
153 if (ret)
154 goto fail;
155
156 trigger = ieee80211_get_assoc_led_name(sc->hw);
157 snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
158 "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
159 ret = ath_register_led(sc, &sc->assoc_led, trigger);
160 sc->assoc_led.led_type = ATH_LED_ASSOC;
161 if (ret)
162 goto fail;
163
164 trigger = ieee80211_get_tx_led_name(sc->hw);
165 snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
166 "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
167 ret = ath_register_led(sc, &sc->tx_led, trigger);
168 sc->tx_led.led_type = ATH_LED_TX;
169 if (ret)
170 goto fail;
171
172 trigger = ieee80211_get_rx_led_name(sc->hw);
173 snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
174 "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
175 ret = ath_register_led(sc, &sc->rx_led, trigger);
176 sc->rx_led.led_type = ATH_LED_RX;
177 if (ret)
178 goto fail;
179
180 return;
181
182fail:
183 cancel_delayed_work_sync(&sc->ath_led_blink_work);
184 ath_deinit_leds(sc);
185}
186
187/*******************/
188/* Rfkill */
189/*******************/
190
191static bool ath_is_rfkill_set(struct ath_softc *sc)
192{
193 struct ath_hw *ah = sc->sc_ah;
194
195 return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
196 ah->rfkill_polarity;
197}
198
199void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
200{
201 struct ath_wiphy *aphy = hw->priv;
202 struct ath_softc *sc = aphy->sc;
203 bool blocked = !!ath_is_rfkill_set(sc);
204
205 wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
206}
207
208void ath_start_rfkill_poll(struct ath_softc *sc)
209{
210 struct ath_hw *ah = sc->sc_ah;
211
212 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
213 wiphy_rfkill_start_polling(sc->hw->wiphy);
214}
215
216/******************/
217/* BTCOEX */
218/******************/
219
220/*
221 * Detects if there is any priority bt traffic
222 */
223static void ath_detect_bt_priority(struct ath_softc *sc)
224{
225 struct ath_btcoex *btcoex = &sc->btcoex;
226 struct ath_hw *ah = sc->sc_ah;
227
228 if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio))
229 btcoex->bt_priority_cnt++;
230
231 if (time_after(jiffies, btcoex->bt_priority_time +
232 msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
233 sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN);
234 /* Detect if colocated bt started scanning */
235 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
236 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
237 "BT scan detected");
238 sc->sc_flags |= (SC_OP_BT_SCAN |
239 SC_OP_BT_PRIORITY_DETECTED);
240 } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
241 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
242 "BT priority traffic detected");
243 sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
244 }
245
246 btcoex->bt_priority_cnt = 0;
247 btcoex->bt_priority_time = jiffies;
248 }
249}
250
251/*
252 * Configures appropriate weight based on stomp type.
253 */
254static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
255 enum ath_stomp_type stomp_type)
256{
257 struct ath_hw *ah = sc->sc_ah;
258
259 switch (stomp_type) {
260 case ATH_BTCOEX_STOMP_ALL:
261 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
262 AR_STOMP_ALL_WLAN_WGHT);
263 break;
264 case ATH_BTCOEX_STOMP_LOW:
265 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
266 AR_STOMP_LOW_WLAN_WGHT);
267 break;
268 case ATH_BTCOEX_STOMP_NONE:
269 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
270 AR_STOMP_NONE_WLAN_WGHT);
271 break;
272 default:
273 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
274 "Invalid Stomptype\n");
275 break;
276 }
277
278 ath9k_hw_btcoex_enable(ah);
279}
280
281static void ath9k_gen_timer_start(struct ath_hw *ah,
282 struct ath_gen_timer *timer,
283 u32 timer_next,
284 u32 timer_period)
285{
286 struct ath_common *common = ath9k_hw_common(ah);
287 struct ath_softc *sc = (struct ath_softc *) common->priv;
288
289 ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
290
291 if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
292 ath9k_hw_set_interrupts(ah, 0);
293 sc->imask |= ATH9K_INT_GENTIMER;
294 ath9k_hw_set_interrupts(ah, sc->imask);
295 }
296}
297
298static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
299{
300 struct ath_common *common = ath9k_hw_common(ah);
301 struct ath_softc *sc = (struct ath_softc *) common->priv;
302 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
303
304 ath9k_hw_gen_timer_stop(ah, timer);
305
306 /* if no timer is enabled, turn off interrupt mask */
307 if (timer_table->timer_mask.val == 0) {
308 ath9k_hw_set_interrupts(ah, 0);
309 sc->imask &= ~ATH9K_INT_GENTIMER;
310 ath9k_hw_set_interrupts(ah, sc->imask);
311 }
312}
313
314/*
315 * This is the master bt coex timer which runs for every
316 * 45ms, bt traffic will be given priority during 55% of this
317 * period while wlan gets remaining 45%
318 */
319static void ath_btcoex_period_timer(unsigned long data)
320{
321 struct ath_softc *sc = (struct ath_softc *) data;
322 struct ath_hw *ah = sc->sc_ah;
323 struct ath_btcoex *btcoex = &sc->btcoex;
324 u32 timer_period;
325 bool is_btscan;
326
327 ath_detect_bt_priority(sc);
328
329 is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
330
331 spin_lock_bh(&btcoex->btcoex_lock);
332
333 ath9k_btcoex_bt_stomp(sc, is_btscan ? ATH_BTCOEX_STOMP_ALL :
334 btcoex->bt_stomp_type);
335
336 spin_unlock_bh(&btcoex->btcoex_lock);
337
338 if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
339 if (btcoex->hw_timer_enabled)
340 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
341
342 timer_period = is_btscan ? btcoex->btscan_no_stomp :
343 btcoex->btcoex_no_stomp;
344 ath9k_gen_timer_start(ah,
345 btcoex->no_stomp_timer,
346 (ath9k_hw_gettsf32(ah) +
347 timer_period), timer_period * 10);
348 btcoex->hw_timer_enabled = true;
349 }
350
351 mod_timer(&btcoex->period_timer, jiffies +
352 msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
353}
354
355/*
356 * Generic tsf based hw timer which configures weight
357 * registers to time slice between wlan and bt traffic
358 */
359static void ath_btcoex_no_stomp_timer(void *arg)
360{
361 struct ath_softc *sc = (struct ath_softc *)arg;
362 struct ath_hw *ah = sc->sc_ah;
363 struct ath_btcoex *btcoex = &sc->btcoex;
364 bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
365
366 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
367 "no stomp timer running \n");
368
369 spin_lock_bh(&btcoex->btcoex_lock);
370
371 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan)
372 ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE);
373 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
374 ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW);
375
376 spin_unlock_bh(&btcoex->btcoex_lock);
377}
378
379int ath_init_btcoex_timer(struct ath_softc *sc)
380{
381 struct ath_btcoex *btcoex = &sc->btcoex;
382
383 btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
384 btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
385 btcoex->btcoex_period / 100;
386 btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) *
387 btcoex->btcoex_period / 100;
388
389 setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
390 (unsigned long) sc);
391
392 spin_lock_init(&btcoex->btcoex_lock);
393
394 btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
395 ath_btcoex_no_stomp_timer,
396 ath_btcoex_no_stomp_timer,
397 (void *) sc, AR_FIRST_NDP_TIMER);
398
399 if (!btcoex->no_stomp_timer)
400 return -ENOMEM;
401
402 return 0;
403}
404
405/*
406 * (Re)start btcoex timers
407 */
408void ath9k_btcoex_timer_resume(struct ath_softc *sc)
409{
410 struct ath_btcoex *btcoex = &sc->btcoex;
411 struct ath_hw *ah = sc->sc_ah;
412
413 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
414 "Starting btcoex timers");
415
416 /* make sure duty cycle timer is also stopped when resuming */
417 if (btcoex->hw_timer_enabled)
418 ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
419
420 btcoex->bt_priority_cnt = 0;
421 btcoex->bt_priority_time = jiffies;
422 sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN);
423
424 mod_timer(&btcoex->period_timer, jiffies);
425}
426
427
428/*
429 * Pause btcoex timer and bt duty cycle timer
430 */
431void ath9k_btcoex_timer_pause(struct ath_softc *sc)
432{
433 struct ath_btcoex *btcoex = &sc->btcoex;
434 struct ath_hw *ah = sc->sc_ah;
435
436 del_timer_sync(&btcoex->period_timer);
437
438 if (btcoex->hw_timer_enabled)
439 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
440
441 btcoex->hw_timer_enabled = false;
442}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index ae371448b5a0..2e767cf22f1e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -52,28 +52,6 @@ module_exit(ath9k_exit);
52/* Helper Functions */ 52/* Helper Functions */
53/********************/ 53/********************/
54 54
55static u32 ath9k_hw_mac_usec(struct ath_hw *ah, u32 clks)
56{
57 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
58
59 if (!ah->curchan) /* should really check for CCK instead */
60 return clks / ATH9K_CLOCK_RATE_CCK;
61 if (conf->channel->band == IEEE80211_BAND_2GHZ)
62 return clks / ATH9K_CLOCK_RATE_2GHZ_OFDM;
63
64 return clks / ATH9K_CLOCK_RATE_5GHZ_OFDM;
65}
66
67static u32 ath9k_hw_mac_to_usec(struct ath_hw *ah, u32 clks)
68{
69 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
70
71 if (conf_is_ht40(conf))
72 return ath9k_hw_mac_usec(ah, clks) / 2;
73 else
74 return ath9k_hw_mac_usec(ah, clks);
75}
76
77static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs) 55static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs)
78{ 56{
79 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; 57 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
@@ -343,30 +321,6 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
343 return true; 321 return true;
344} 322}
345 323
346static const char *ath9k_hw_devname(u16 devid)
347{
348 switch (devid) {
349 case AR5416_DEVID_PCI:
350 return "Atheros 5416";
351 case AR5416_DEVID_PCIE:
352 return "Atheros 5418";
353 case AR9160_DEVID_PCI:
354 return "Atheros 9160";
355 case AR5416_AR9100_DEVID:
356 return "Atheros 9100";
357 case AR9280_DEVID_PCI:
358 case AR9280_DEVID_PCIE:
359 return "Atheros 9280";
360 case AR9285_DEVID_PCIE:
361 return "Atheros 9285";
362 case AR5416_DEVID_AR9287_PCI:
363 case AR5416_DEVID_AR9287_PCIE:
364 return "Atheros 9287";
365 }
366
367 return NULL;
368}
369
370static void ath9k_hw_init_config(struct ath_hw *ah) 324static void ath9k_hw_init_config(struct ath_hw *ah)
371{ 325{
372 int i; 326 int i;
@@ -380,7 +334,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
380 ah->config.pcie_clock_req = 0; 334 ah->config.pcie_clock_req = 0;
381 ah->config.pcie_waen = 0; 335 ah->config.pcie_waen = 0;
382 ah->config.analog_shiftreg = 1; 336 ah->config.analog_shiftreg = 1;
383 ah->config.ht_enable = 1;
384 ah->config.ofdm_trig_low = 200; 337 ah->config.ofdm_trig_low = 200;
385 ah->config.ofdm_trig_high = 500; 338 ah->config.ofdm_trig_high = 500;
386 ah->config.cck_trig_high = 200; 339 ah->config.cck_trig_high = 200;
@@ -392,7 +345,12 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
392 ah->config.spurchans[i][1] = AR_NO_SPUR; 345 ah->config.spurchans[i][1] = AR_NO_SPUR;
393 } 346 }
394 347
395 ah->config.intr_mitigation = true; 348 if (ah->hw_version.devid != AR2427_DEVID_PCIE)
349 ah->config.ht_enable = 1;
350 else
351 ah->config.ht_enable = 0;
352
353 ah->config.rx_intr_mitigation = true;
396 354
397 /* 355 /*
398 * We need this for PCI devices only (Cardbus, PCI, miniPCI) 356 * We need this for PCI devices only (Cardbus, PCI, miniPCI)
@@ -437,8 +395,6 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
437 ah->beacon_interval = 100; 395 ah->beacon_interval = 100;
438 ah->enable_32kHz_clock = DONT_USE_32KHZ; 396 ah->enable_32kHz_clock = DONT_USE_32KHZ;
439 ah->slottime = (u32) -1; 397 ah->slottime = (u32) -1;
440 ah->acktimeout = (u32) -1;
441 ah->ctstimeout = (u32) -1;
442 ah->globaltxtimeout = (u32) -1; 398 ah->globaltxtimeout = (u32) -1;
443 ah->power_mode = ATH9K_PM_UNDEFINED; 399 ah->power_mode = ATH9K_PM_UNDEFINED;
444} 400}
@@ -590,6 +546,7 @@ static bool ath9k_hw_devid_supported(u16 devid)
590 case AR5416_DEVID_AR9287_PCI: 546 case AR5416_DEVID_AR9287_PCI:
591 case AR5416_DEVID_AR9287_PCIE: 547 case AR5416_DEVID_AR9287_PCIE:
592 case AR9271_USB: 548 case AR9271_USB:
549 case AR2427_DEVID_PCIE:
593 return true; 550 return true;
594 default: 551 default:
595 break; 552 break;
@@ -1183,7 +1140,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
1183 AR_IMR_RXORN | 1140 AR_IMR_RXORN |
1184 AR_IMR_BCNMISC; 1141 AR_IMR_BCNMISC;
1185 1142
1186 if (ah->config.intr_mitigation) 1143 if (ah->config.rx_intr_mitigation)
1187 ah->mask_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR; 1144 ah->mask_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
1188 else 1145 else
1189 ah->mask_reg |= AR_IMR_RXOK; 1146 ah->mask_reg |= AR_IMR_RXOK;
@@ -1203,34 +1160,25 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
1203 } 1160 }
1204} 1161}
1205 1162
1206static bool ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us) 1163static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
1207{ 1164{
1208 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) { 1165 u32 val = ath9k_hw_mac_to_clks(ah, us);
1209 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, 1166 val = min(val, (u32) 0xFFFF);
1210 "bad ack timeout %u\n", us); 1167 REG_WRITE(ah, AR_D_GBL_IFS_SLOT, val);
1211 ah->acktimeout = (u32) -1;
1212 return false;
1213 } else {
1214 REG_RMW_FIELD(ah, AR_TIME_OUT,
1215 AR_TIME_OUT_ACK, ath9k_hw_mac_to_clks(ah, us));
1216 ah->acktimeout = us;
1217 return true;
1218 }
1219} 1168}
1220 1169
1221static bool ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us) 1170static void ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us)
1222{ 1171{
1223 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) { 1172 u32 val = ath9k_hw_mac_to_clks(ah, us);
1224 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, 1173 val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_ACK));
1225 "bad cts timeout %u\n", us); 1174 REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_ACK, val);
1226 ah->ctstimeout = (u32) -1; 1175}
1227 return false; 1176
1228 } else { 1177static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
1229 REG_RMW_FIELD(ah, AR_TIME_OUT, 1178{
1230 AR_TIME_OUT_CTS, ath9k_hw_mac_to_clks(ah, us)); 1179 u32 val = ath9k_hw_mac_to_clks(ah, us);
1231 ah->ctstimeout = us; 1180 val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_CTS));
1232 return true; 1181 REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_CTS, val);
1233 }
1234} 1182}
1235 1183
1236static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu) 1184static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
@@ -1247,31 +1195,48 @@ static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
1247 } 1195 }
1248} 1196}
1249 1197
1250static void ath9k_hw_init_user_settings(struct ath_hw *ah) 1198void ath9k_hw_init_global_settings(struct ath_hw *ah)
1251{ 1199{
1200 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
1201 int acktimeout;
1202 int slottime;
1203 int sifstime;
1204
1252 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n", 1205 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n",
1253 ah->misc_mode); 1206 ah->misc_mode);
1254 1207
1255 if (ah->misc_mode != 0) 1208 if (ah->misc_mode != 0)
1256 REG_WRITE(ah, AR_PCU_MISC, 1209 REG_WRITE(ah, AR_PCU_MISC,
1257 REG_READ(ah, AR_PCU_MISC) | ah->misc_mode); 1210 REG_READ(ah, AR_PCU_MISC) | ah->misc_mode);
1258 if (ah->slottime != (u32) -1) 1211
1259 ath9k_hw_setslottime(ah, ah->slottime); 1212 if (conf->channel && conf->channel->band == IEEE80211_BAND_5GHZ)
1260 if (ah->acktimeout != (u32) -1) 1213 sifstime = 16;
1261 ath9k_hw_set_ack_timeout(ah, ah->acktimeout); 1214 else
1262 if (ah->ctstimeout != (u32) -1) 1215 sifstime = 10;
1263 ath9k_hw_set_cts_timeout(ah, ah->ctstimeout); 1216
1217 /* As defined by IEEE 802.11-2007 17.3.8.6 */
1218 slottime = ah->slottime + 3 * ah->coverage_class;
1219 acktimeout = slottime + sifstime;
1220
1221 /*
1222 * Workaround for early ACK timeouts, add an offset to match the
1223 * initval's 64us ack timeout value.
1224 * This was initially only meant to work around an issue with delayed
1225 * BA frames in some implementations, but it has been found to fix ACK
1226 * timeout issues in other cases as well.
1227 */
1228 if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ)
1229 acktimeout += 64 - sifstime - ah->slottime;
1230
1231 ath9k_hw_setslottime(ah, slottime);
1232 ath9k_hw_set_ack_timeout(ah, acktimeout);
1233 ath9k_hw_set_cts_timeout(ah, acktimeout);
1264 if (ah->globaltxtimeout != (u32) -1) 1234 if (ah->globaltxtimeout != (u32) -1)
1265 ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout); 1235 ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout);
1266} 1236}
1237EXPORT_SYMBOL(ath9k_hw_init_global_settings);
1267 1238
1268const char *ath9k_hw_probe(u16 vendorid, u16 devid) 1239void ath9k_hw_deinit(struct ath_hw *ah)
1269{
1270 return vendorid == ATHEROS_VENDOR_ID ?
1271 ath9k_hw_devname(devid) : NULL;
1272}
1273
1274void ath9k_hw_detach(struct ath_hw *ah)
1275{ 1240{
1276 struct ath_common *common = ath9k_hw_common(ah); 1241 struct ath_common *common = ath9k_hw_common(ah);
1277 1242
@@ -1289,7 +1254,7 @@ free_hw:
1289 kfree(ah); 1254 kfree(ah);
1290 ah = NULL; 1255 ah = NULL;
1291} 1256}
1292EXPORT_SYMBOL(ath9k_hw_detach); 1257EXPORT_SYMBOL(ath9k_hw_deinit);
1293 1258
1294/*******/ 1259/*******/
1295/* INI */ 1260/* INI */
@@ -1345,6 +1310,16 @@ static void ath9k_hw_override_ini(struct ath_hw *ah,
1345 * Necessary to avoid issues on AR5416 2.0 1310 * Necessary to avoid issues on AR5416 2.0
1346 */ 1311 */
1347 REG_WRITE(ah, 0x9800 + (651 << 2), 0x11); 1312 REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
1313
1314 /*
1315 * Disable RIFS search on some chips to avoid baseband
1316 * hang issues.
1317 */
1318 if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
1319 val = REG_READ(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS);
1320 val &= ~AR_PHY_RIFS_INIT_DELAY;
1321 REG_WRITE(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS, val);
1322 }
1348} 1323}
1349 1324
1350static u32 ath9k_hw_def_ini_fixup(struct ath_hw *ah, 1325static u32 ath9k_hw_def_ini_fixup(struct ath_hw *ah,
@@ -2090,7 +2065,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2090 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) 2065 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
2091 ath9k_enable_rfkill(ah); 2066 ath9k_enable_rfkill(ah);
2092 2067
2093 ath9k_hw_init_user_settings(ah); 2068 ath9k_hw_init_global_settings(ah);
2094 2069
2095 if (AR_SREV_9287_12_OR_LATER(ah)) { 2070 if (AR_SREV_9287_12_OR_LATER(ah)) {
2096 REG_WRITE(ah, AR_D_GBL_IFS_SIFS, 2071 REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
@@ -2120,7 +2095,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2120 2095
2121 REG_WRITE(ah, AR_OBS, 8); 2096 REG_WRITE(ah, AR_OBS, 8);
2122 2097
2123 if (ah->config.intr_mitigation) { 2098 if (ah->config.rx_intr_mitigation) {
2124 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500); 2099 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
2125 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000); 2100 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
2126 } 2101 }
@@ -2780,7 +2755,7 @@ bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
2780 2755
2781 *masked = isr & ATH9K_INT_COMMON; 2756 *masked = isr & ATH9K_INT_COMMON;
2782 2757
2783 if (ah->config.intr_mitigation) { 2758 if (ah->config.rx_intr_mitigation) {
2784 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM)) 2759 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
2785 *masked |= ATH9K_INT_RX; 2760 *masked |= ATH9K_INT_RX;
2786 } 2761 }
@@ -2913,7 +2888,7 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
2913 } 2888 }
2914 if (ints & ATH9K_INT_RX) { 2889 if (ints & ATH9K_INT_RX) {
2915 mask |= AR_IMR_RXERR; 2890 mask |= AR_IMR_RXERR;
2916 if (ah->config.intr_mitigation) 2891 if (ah->config.rx_intr_mitigation)
2917 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM; 2892 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
2918 else 2893 else
2919 mask |= AR_IMR_RXOK | AR_IMR_RXDESC; 2894 mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
@@ -3687,21 +3662,6 @@ u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp)
3687} 3662}
3688EXPORT_SYMBOL(ath9k_hw_extend_tsf); 3663EXPORT_SYMBOL(ath9k_hw_extend_tsf);
3689 3664
3690bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
3691{
3692 if (us < ATH9K_SLOT_TIME_9 || us > ath9k_hw_mac_to_usec(ah, 0xffff)) {
3693 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
3694 "bad slot time %u\n", us);
3695 ah->slottime = (u32) -1;
3696 return false;
3697 } else {
3698 REG_WRITE(ah, AR_D_GBL_IFS_SLOT, ath9k_hw_mac_to_clks(ah, us));
3699 ah->slottime = us;
3700 return true;
3701 }
3702}
3703EXPORT_SYMBOL(ath9k_hw_setslottime);
3704
3705void ath9k_hw_set11nmac2040(struct ath_hw *ah) 3665void ath9k_hw_set11nmac2040(struct ath_hw *ah)
3706{ 3666{
3707 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; 3667 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index e2b0c73a616f..dbbf7ca5f97d 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -40,6 +40,7 @@
40#define AR9280_DEVID_PCI 0x0029 40#define AR9280_DEVID_PCI 0x0029
41#define AR9280_DEVID_PCIE 0x002a 41#define AR9280_DEVID_PCIE 0x002a
42#define AR9285_DEVID_PCIE 0x002b 42#define AR9285_DEVID_PCIE 0x002b
43#define AR2427_DEVID_PCIE 0x002c
43 44
44#define AR5416_AR9100_DEVID 0x000b 45#define AR5416_AR9100_DEVID 0x000b
45 46
@@ -212,7 +213,7 @@ struct ath9k_ops_config {
212 u32 cck_trig_low; 213 u32 cck_trig_low;
213 u32 enable_ani; 214 u32 enable_ani;
214 int serialize_regmode; 215 int serialize_regmode;
215 bool intr_mitigation; 216 bool rx_intr_mitigation;
216#define SPUR_DISABLE 0 217#define SPUR_DISABLE 0
217#define SPUR_ENABLE_IOCTL 1 218#define SPUR_ENABLE_IOCTL 1
218#define SPUR_ENABLE_EEPROM 2 219#define SPUR_ENABLE_EEPROM 2
@@ -551,10 +552,9 @@ struct ath_hw {
551 u32 *bank6Temp; 552 u32 *bank6Temp;
552 553
553 int16_t txpower_indexoffset; 554 int16_t txpower_indexoffset;
555 int coverage_class;
554 u32 beacon_interval; 556 u32 beacon_interval;
555 u32 slottime; 557 u32 slottime;
556 u32 acktimeout;
557 u32 ctstimeout;
558 u32 globaltxtimeout; 558 u32 globaltxtimeout;
559 559
560 /* ANI */ 560 /* ANI */
@@ -616,7 +616,7 @@ static inline struct ath_regulatory *ath9k_hw_regulatory(struct ath_hw *ah)
616 616
617/* Initialization, Detach, Reset */ 617/* Initialization, Detach, Reset */
618const char *ath9k_hw_probe(u16 vendorid, u16 devid); 618const char *ath9k_hw_probe(u16 vendorid, u16 devid);
619void ath9k_hw_detach(struct ath_hw *ah); 619void ath9k_hw_deinit(struct ath_hw *ah);
620int ath9k_hw_init(struct ath_hw *ah); 620int ath9k_hw_init(struct ath_hw *ah);
621int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, 621int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
622 bool bChannelChange); 622 bool bChannelChange);
@@ -668,7 +668,7 @@ void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
668void ath9k_hw_reset_tsf(struct ath_hw *ah); 668void ath9k_hw_reset_tsf(struct ath_hw *ah);
669void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting); 669void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
670u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp); 670u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp);
671bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us); 671void ath9k_hw_init_global_settings(struct ath_hw *ah);
672void ath9k_hw_set11nmac2040(struct ath_hw *ah); 672void ath9k_hw_set11nmac2040(struct ath_hw *ah);
673void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period); 673void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
674void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, 674void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
new file mode 100644
index 000000000000..623c2f884987
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -0,0 +1,863 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "ath9k.h"
18
19static char *dev_info = "ath9k";
20
21MODULE_AUTHOR("Atheros Communications");
22MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
23MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
24MODULE_LICENSE("Dual BSD/GPL");
25
26static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
27module_param_named(debug, ath9k_debug, uint, 0);
28MODULE_PARM_DESC(debug, "Debugging mask");
29
30int modparam_nohwcrypt;
31module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
32MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
33
34/* We use the hw_value as an index into our private channel structure */
35
36#define CHAN2G(_freq, _idx) { \
37 .center_freq = (_freq), \
38 .hw_value = (_idx), \
39 .max_power = 20, \
40}
41
42#define CHAN5G(_freq, _idx) { \
43 .band = IEEE80211_BAND_5GHZ, \
44 .center_freq = (_freq), \
45 .hw_value = (_idx), \
46 .max_power = 20, \
47}
48
49/* Some 2 GHz radios are actually tunable on 2312-2732
50 * on 5 MHz steps, we support the channels which we know
51 * we have calibration data for all cards though to make
52 * this static */
53static struct ieee80211_channel ath9k_2ghz_chantable[] = {
54 CHAN2G(2412, 0), /* Channel 1 */
55 CHAN2G(2417, 1), /* Channel 2 */
56 CHAN2G(2422, 2), /* Channel 3 */
57 CHAN2G(2427, 3), /* Channel 4 */
58 CHAN2G(2432, 4), /* Channel 5 */
59 CHAN2G(2437, 5), /* Channel 6 */
60 CHAN2G(2442, 6), /* Channel 7 */
61 CHAN2G(2447, 7), /* Channel 8 */
62 CHAN2G(2452, 8), /* Channel 9 */
63 CHAN2G(2457, 9), /* Channel 10 */
64 CHAN2G(2462, 10), /* Channel 11 */
65 CHAN2G(2467, 11), /* Channel 12 */
66 CHAN2G(2472, 12), /* Channel 13 */
67 CHAN2G(2484, 13), /* Channel 14 */
68};
69
70/* Some 5 GHz radios are actually tunable on XXXX-YYYY
71 * on 5 MHz steps, we support the channels which we know
72 * we have calibration data for all cards though to make
73 * this static */
74static struct ieee80211_channel ath9k_5ghz_chantable[] = {
75 /* _We_ call this UNII 1 */
76 CHAN5G(5180, 14), /* Channel 36 */
77 CHAN5G(5200, 15), /* Channel 40 */
78 CHAN5G(5220, 16), /* Channel 44 */
79 CHAN5G(5240, 17), /* Channel 48 */
80 /* _We_ call this UNII 2 */
81 CHAN5G(5260, 18), /* Channel 52 */
82 CHAN5G(5280, 19), /* Channel 56 */
83 CHAN5G(5300, 20), /* Channel 60 */
84 CHAN5G(5320, 21), /* Channel 64 */
85 /* _We_ call this "Middle band" */
86 CHAN5G(5500, 22), /* Channel 100 */
87 CHAN5G(5520, 23), /* Channel 104 */
88 CHAN5G(5540, 24), /* Channel 108 */
89 CHAN5G(5560, 25), /* Channel 112 */
90 CHAN5G(5580, 26), /* Channel 116 */
91 CHAN5G(5600, 27), /* Channel 120 */
92 CHAN5G(5620, 28), /* Channel 124 */
93 CHAN5G(5640, 29), /* Channel 128 */
94 CHAN5G(5660, 30), /* Channel 132 */
95 CHAN5G(5680, 31), /* Channel 136 */
96 CHAN5G(5700, 32), /* Channel 140 */
97 /* _We_ call this UNII 3 */
98 CHAN5G(5745, 33), /* Channel 149 */
99 CHAN5G(5765, 34), /* Channel 153 */
100 CHAN5G(5785, 35), /* Channel 157 */
101 CHAN5G(5805, 36), /* Channel 161 */
102 CHAN5G(5825, 37), /* Channel 165 */
103};
104
105/* Atheros hardware rate code addition for short premble */
106#define SHPCHECK(__hw_rate, __flags) \
107 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
108
109#define RATE(_bitrate, _hw_rate, _flags) { \
110 .bitrate = (_bitrate), \
111 .flags = (_flags), \
112 .hw_value = (_hw_rate), \
113 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
114}
115
116static struct ieee80211_rate ath9k_legacy_rates[] = {
117 RATE(10, 0x1b, 0),
118 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
119 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
120 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
121 RATE(60, 0x0b, 0),
122 RATE(90, 0x0f, 0),
123 RATE(120, 0x0a, 0),
124 RATE(180, 0x0e, 0),
125 RATE(240, 0x09, 0),
126 RATE(360, 0x0d, 0),
127 RATE(480, 0x08, 0),
128 RATE(540, 0x0c, 0),
129};
130
131static void ath9k_deinit_softc(struct ath_softc *sc);
132
133/*
134 * Read and write, they both share the same lock. We do this to serialize
135 * reads and writes on Atheros 802.11n PCI devices only. This is required
136 * as the FIFO on these devices can only accept sanely 2 requests.
137 */
138
139static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
140{
141 struct ath_hw *ah = (struct ath_hw *) hw_priv;
142 struct ath_common *common = ath9k_hw_common(ah);
143 struct ath_softc *sc = (struct ath_softc *) common->priv;
144
145 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
146 unsigned long flags;
147 spin_lock_irqsave(&sc->sc_serial_rw, flags);
148 iowrite32(val, sc->mem + reg_offset);
149 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
150 } else
151 iowrite32(val, sc->mem + reg_offset);
152}
153
154static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
155{
156 struct ath_hw *ah = (struct ath_hw *) hw_priv;
157 struct ath_common *common = ath9k_hw_common(ah);
158 struct ath_softc *sc = (struct ath_softc *) common->priv;
159 u32 val;
160
161 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
162 unsigned long flags;
163 spin_lock_irqsave(&sc->sc_serial_rw, flags);
164 val = ioread32(sc->mem + reg_offset);
165 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
166 } else
167 val = ioread32(sc->mem + reg_offset);
168 return val;
169}
170
171static const struct ath_ops ath9k_common_ops = {
172 .read = ath9k_ioread32,
173 .write = ath9k_iowrite32,
174};
175
176/**************************/
177/* Initialization */
178/**************************/
179
180static void setup_ht_cap(struct ath_softc *sc,
181 struct ieee80211_sta_ht_cap *ht_info)
182{
183 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
184 u8 tx_streams, rx_streams;
185
186 ht_info->ht_supported = true;
187 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
188 IEEE80211_HT_CAP_SM_PS |
189 IEEE80211_HT_CAP_SGI_40 |
190 IEEE80211_HT_CAP_DSSSCCK40;
191
192 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
193 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
194
195 /* set up supported mcs set */
196 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
197 tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
198 1 : 2;
199 rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
200 1 : 2;
201
202 if (tx_streams != rx_streams) {
203 ath_print(common, ATH_DBG_CONFIG,
204 "TX streams %d, RX streams: %d\n",
205 tx_streams, rx_streams);
206 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
207 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
208 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
209 }
210
211 ht_info->mcs.rx_mask[0] = 0xff;
212 if (rx_streams >= 2)
213 ht_info->mcs.rx_mask[1] = 0xff;
214
215 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
216}
217
218static int ath9k_reg_notifier(struct wiphy *wiphy,
219 struct regulatory_request *request)
220{
221 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
222 struct ath_wiphy *aphy = hw->priv;
223 struct ath_softc *sc = aphy->sc;
224 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
225
226 return ath_reg_notifier_apply(wiphy, request, reg);
227}
228
229/*
230 * This function will allocate both the DMA descriptor structure, and the
231 * buffers it contains. These are used to contain the descriptors used
232 * by the system.
233*/
234int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
235 struct list_head *head, const char *name,
236 int nbuf, int ndesc)
237{
238#define DS2PHYS(_dd, _ds) \
239 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
240#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
241#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
242 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
243 struct ath_desc *ds;
244 struct ath_buf *bf;
245 int i, bsize, error;
246
247 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
248 name, nbuf, ndesc);
249
250 INIT_LIST_HEAD(head);
251 /* ath_desc must be a multiple of DWORDs */
252 if ((sizeof(struct ath_desc) % 4) != 0) {
253 ath_print(common, ATH_DBG_FATAL,
254 "ath_desc not DWORD aligned\n");
255 BUG_ON((sizeof(struct ath_desc) % 4) != 0);
256 error = -ENOMEM;
257 goto fail;
258 }
259
260 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
261
262 /*
263 * Need additional DMA memory because we can't use
264 * descriptors that cross the 4K page boundary. Assume
265 * one skipped descriptor per 4K page.
266 */
267 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
268 u32 ndesc_skipped =
269 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
270 u32 dma_len;
271
272 while (ndesc_skipped) {
273 dma_len = ndesc_skipped * sizeof(struct ath_desc);
274 dd->dd_desc_len += dma_len;
275
276 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
277 };
278 }
279
280 /* allocate descriptors */
281 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
282 &dd->dd_desc_paddr, GFP_KERNEL);
283 if (dd->dd_desc == NULL) {
284 error = -ENOMEM;
285 goto fail;
286 }
287 ds = dd->dd_desc;
288 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
289 name, ds, (u32) dd->dd_desc_len,
290 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
291
292 /* allocate buffers */
293 bsize = sizeof(struct ath_buf) * nbuf;
294 bf = kzalloc(bsize, GFP_KERNEL);
295 if (bf == NULL) {
296 error = -ENOMEM;
297 goto fail2;
298 }
299 dd->dd_bufptr = bf;
300
301 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
302 bf->bf_desc = ds;
303 bf->bf_daddr = DS2PHYS(dd, ds);
304
305 if (!(sc->sc_ah->caps.hw_caps &
306 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
307 /*
308 * Skip descriptor addresses which can cause 4KB
309 * boundary crossing (addr + length) with a 32 dword
310 * descriptor fetch.
311 */
312 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
313 BUG_ON((caddr_t) bf->bf_desc >=
314 ((caddr_t) dd->dd_desc +
315 dd->dd_desc_len));
316
317 ds += ndesc;
318 bf->bf_desc = ds;
319 bf->bf_daddr = DS2PHYS(dd, ds);
320 }
321 }
322 list_add_tail(&bf->list, head);
323 }
324 return 0;
325fail2:
326 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
327 dd->dd_desc_paddr);
328fail:
329 memset(dd, 0, sizeof(*dd));
330 return error;
331#undef ATH_DESC_4KB_BOUND_CHECK
332#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
333#undef DS2PHYS
334}
335
336static void ath9k_init_crypto(struct ath_softc *sc)
337{
338 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
339 int i = 0;
340
341 /* Get the hardware key cache size. */
342 common->keymax = sc->sc_ah->caps.keycache_size;
343 if (common->keymax > ATH_KEYMAX) {
344 ath_print(common, ATH_DBG_ANY,
345 "Warning, using only %u entries in %u key cache\n",
346 ATH_KEYMAX, common->keymax);
347 common->keymax = ATH_KEYMAX;
348 }
349
350 /*
351 * Reset the key cache since some parts do not
352 * reset the contents on initial power up.
353 */
354 for (i = 0; i < common->keymax; i++)
355 ath9k_hw_keyreset(sc->sc_ah, (u16) i);
356
357 if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
358 ATH9K_CIPHER_TKIP, NULL)) {
359 /*
360 * Whether we should enable h/w TKIP MIC.
361 * XXX: if we don't support WME TKIP MIC, then we wouldn't
362 * report WMM capable, so it's always safe to turn on
363 * TKIP MIC in this case.
364 */
365 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
366 }
367
368 /*
369 * Check whether the separate key cache entries
370 * are required to handle both tx+rx MIC keys.
371 * With split mic keys the number of stations is limited
372 * to 27 otherwise 59.
373 */
374 if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
375 ATH9K_CIPHER_TKIP, NULL)
376 && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
377 ATH9K_CIPHER_MIC, NULL)
378 && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_TKIP_SPLIT,
379 0, NULL))
380 common->splitmic = 1;
381
382 /* turn on mcast key search if possible */
383 if (!ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
384 (void)ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH,
385 1, 1, NULL);
386
387}
388
389static int ath9k_init_btcoex(struct ath_softc *sc)
390{
391 int r, qnum;
392
393 switch (sc->sc_ah->btcoex_hw.scheme) {
394 case ATH_BTCOEX_CFG_NONE:
395 break;
396 case ATH_BTCOEX_CFG_2WIRE:
397 ath9k_hw_btcoex_init_2wire(sc->sc_ah);
398 break;
399 case ATH_BTCOEX_CFG_3WIRE:
400 ath9k_hw_btcoex_init_3wire(sc->sc_ah);
401 r = ath_init_btcoex_timer(sc);
402 if (r)
403 return -1;
404 qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
405 ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
406 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
407 break;
408 default:
409 WARN_ON(1);
410 break;
411 }
412
413 return 0;
414}
415
416static int ath9k_init_queues(struct ath_softc *sc)
417{
418 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
419 int i = 0;
420
421 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
422 sc->tx.hwq_map[i] = -1;
423
424 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
425 if (sc->beacon.beaconq == -1) {
426 ath_print(common, ATH_DBG_FATAL,
427 "Unable to setup a beacon xmit queue\n");
428 goto err;
429 }
430
431 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
432 if (sc->beacon.cabq == NULL) {
433 ath_print(common, ATH_DBG_FATAL,
434 "Unable to setup CAB xmit queue\n");
435 goto err;
436 }
437
438 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
439 ath_cabq_update(sc);
440
441 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
442 ath_print(common, ATH_DBG_FATAL,
443 "Unable to setup xmit queue for BK traffic\n");
444 goto err;
445 }
446
447 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
448 ath_print(common, ATH_DBG_FATAL,
449 "Unable to setup xmit queue for BE traffic\n");
450 goto err;
451 }
452 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
453 ath_print(common, ATH_DBG_FATAL,
454 "Unable to setup xmit queue for VI traffic\n");
455 goto err;
456 }
457 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
458 ath_print(common, ATH_DBG_FATAL,
459 "Unable to setup xmit queue for VO traffic\n");
460 goto err;
461 }
462
463 return 0;
464
465err:
466 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
467 if (ATH_TXQ_SETUP(sc, i))
468 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
469
470 return -EIO;
471}
472
473static void ath9k_init_channels_rates(struct ath_softc *sc)
474{
475 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
476 sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
477 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
478 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
479 ARRAY_SIZE(ath9k_2ghz_chantable);
480 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
481 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
482 ARRAY_SIZE(ath9k_legacy_rates);
483 }
484
485 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
486 sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
487 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
488 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
489 ARRAY_SIZE(ath9k_5ghz_chantable);
490 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
491 ath9k_legacy_rates + 4;
492 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
493 ARRAY_SIZE(ath9k_legacy_rates) - 4;
494 }
495}
496
497static void ath9k_init_misc(struct ath_softc *sc)
498{
499 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
500 int i = 0;
501
502 common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
503 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
504
505 sc->config.txpowlimit = ATH_TXPOWER_MAX;
506
507 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
508 sc->sc_flags |= SC_OP_TXAGGR;
509 sc->sc_flags |= SC_OP_RXAGGR;
510 }
511
512 common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
513 common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
514
515 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
516 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
517
518 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
519 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
520
521 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
522
523 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
524 sc->beacon.bslot[i] = NULL;
525 sc->beacon.bslot_aphy[i] = NULL;
526 }
527}
528
529static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
530 const struct ath_bus_ops *bus_ops)
531{
532 struct ath_hw *ah = NULL;
533 struct ath_common *common;
534 int ret = 0, i;
535 int csz = 0;
536
537 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
538 if (!ah)
539 return -ENOMEM;
540
541 ah->hw_version.devid = devid;
542 ah->hw_version.subsysid = subsysid;
543 sc->sc_ah = ah;
544
545 common = ath9k_hw_common(ah);
546 common->ops = &ath9k_common_ops;
547 common->bus_ops = bus_ops;
548 common->ah = ah;
549 common->hw = sc->hw;
550 common->priv = sc;
551 common->debug_mask = ath9k_debug;
552
553 spin_lock_init(&sc->wiphy_lock);
554 spin_lock_init(&sc->sc_resetlock);
555 spin_lock_init(&sc->sc_serial_rw);
556 spin_lock_init(&sc->sc_pm_lock);
557 mutex_init(&sc->mutex);
558 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
559 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
560 (unsigned long)sc);
561
562 /*
563 * Cache line size is used to size and align various
564 * structures used to communicate with the hardware.
565 */
566 ath_read_cachesize(common, &csz);
567 common->cachelsz = csz << 2; /* convert to bytes */
568
569 ret = ath9k_hw_init(ah);
570 if (ret) {
571 ath_print(common, ATH_DBG_FATAL,
572 "Unable to initialize hardware; "
573 "initialization status: %d\n", ret);
574 goto err_hw;
575 }
576
577 ret = ath9k_init_debug(ah);
578 if (ret) {
579 ath_print(common, ATH_DBG_FATAL,
580 "Unable to create debugfs files\n");
581 goto err_debug;
582 }
583
584 ret = ath9k_init_queues(sc);
585 if (ret)
586 goto err_queues;
587
588 ret = ath9k_init_btcoex(sc);
589 if (ret)
590 goto err_btcoex;
591
592 ath9k_init_crypto(sc);
593 ath9k_init_channels_rates(sc);
594 ath9k_init_misc(sc);
595
596 return 0;
597
598err_btcoex:
599 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
600 if (ATH_TXQ_SETUP(sc, i))
601 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
602err_queues:
603 ath9k_exit_debug(ah);
604err_debug:
605 ath9k_hw_deinit(ah);
606err_hw:
607 tasklet_kill(&sc->intr_tq);
608 tasklet_kill(&sc->bcon_tasklet);
609
610 kfree(ah);
611 sc->sc_ah = NULL;
612
613 return ret;
614}
615
616void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
617{
618 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
619
620 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
621 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
622 IEEE80211_HW_SIGNAL_DBM |
623 IEEE80211_HW_SUPPORTS_PS |
624 IEEE80211_HW_PS_NULLFUNC_STACK |
625 IEEE80211_HW_SPECTRUM_MGMT |
626 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
627
628 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
629 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
630
631 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
632 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
633
634 hw->wiphy->interface_modes =
635 BIT(NL80211_IFTYPE_AP) |
636 BIT(NL80211_IFTYPE_STATION) |
637 BIT(NL80211_IFTYPE_ADHOC) |
638 BIT(NL80211_IFTYPE_MESH_POINT);
639
640 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
641
642 hw->queues = 4;
643 hw->max_rates = 4;
644 hw->channel_change_time = 5000;
645 hw->max_listen_interval = 10;
646 hw->max_rate_tries = 10;
647 hw->sta_data_size = sizeof(struct ath_node);
648 hw->vif_data_size = sizeof(struct ath_vif);
649
650 hw->rate_control_algorithm = "ath9k_rate_control";
651
652 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
653 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
654 &sc->sbands[IEEE80211_BAND_2GHZ];
655 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
656 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
657 &sc->sbands[IEEE80211_BAND_5GHZ];
658
659 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
660 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
661 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
662 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
663 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
664 }
665
666 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
667}
668
669int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
670 const struct ath_bus_ops *bus_ops)
671{
672 struct ieee80211_hw *hw = sc->hw;
673 struct ath_common *common;
674 struct ath_hw *ah;
675 int error = 0;
676 struct ath_regulatory *reg;
677
678 /* Bring up device */
679 error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
680 if (error != 0)
681 goto error_init;
682
683 ah = sc->sc_ah;
684 common = ath9k_hw_common(ah);
685 ath9k_set_hw_capab(sc, hw);
686
687 /* Initialize regulatory */
688 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
689 ath9k_reg_notifier);
690 if (error)
691 goto error_regd;
692
693 reg = &common->regulatory;
694
695 /* Setup TX DMA */
696 error = ath_tx_init(sc, ATH_TXBUF);
697 if (error != 0)
698 goto error_tx;
699
700 /* Setup RX DMA */
701 error = ath_rx_init(sc, ATH_RXBUF);
702 if (error != 0)
703 goto error_rx;
704
705 /* Register with mac80211 */
706 error = ieee80211_register_hw(hw);
707 if (error)
708 goto error_register;
709
710 /* Handle world regulatory */
711 if (!ath_is_world_regd(reg)) {
712 error = regulatory_hint(hw->wiphy, reg->alpha2);
713 if (error)
714 goto error_world;
715 }
716
717 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
718 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
719 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
720
721 ath_init_leds(sc);
722 ath_start_rfkill_poll(sc);
723
724 return 0;
725
726error_world:
727 ieee80211_unregister_hw(hw);
728error_register:
729 ath_rx_cleanup(sc);
730error_rx:
731 ath_tx_cleanup(sc);
732error_tx:
733 /* Nothing */
734error_regd:
735 ath9k_deinit_softc(sc);
736error_init:
737 return error;
738}
739
740/*****************************/
741/* De-Initialization */
742/*****************************/
743
744static void ath9k_deinit_softc(struct ath_softc *sc)
745{
746 int i = 0;
747
748 if ((sc->btcoex.no_stomp_timer) &&
749 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
750 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
751
752 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
753 if (ATH_TXQ_SETUP(sc, i))
754 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
755
756 ath9k_exit_debug(sc->sc_ah);
757 ath9k_hw_deinit(sc->sc_ah);
758
759 tasklet_kill(&sc->intr_tq);
760 tasklet_kill(&sc->bcon_tasklet);
761}
762
763void ath9k_deinit_device(struct ath_softc *sc)
764{
765 struct ieee80211_hw *hw = sc->hw;
766 int i = 0;
767
768 ath9k_ps_wakeup(sc);
769
770 wiphy_rfkill_stop_polling(sc->hw->wiphy);
771 ath_deinit_leds(sc);
772
773 for (i = 0; i < sc->num_sec_wiphy; i++) {
774 struct ath_wiphy *aphy = sc->sec_wiphy[i];
775 if (aphy == NULL)
776 continue;
777 sc->sec_wiphy[i] = NULL;
778 ieee80211_unregister_hw(aphy->hw);
779 ieee80211_free_hw(aphy->hw);
780 }
781 kfree(sc->sec_wiphy);
782
783 ieee80211_unregister_hw(hw);
784 ath_rx_cleanup(sc);
785 ath_tx_cleanup(sc);
786 ath9k_deinit_softc(sc);
787}
788
789void ath_descdma_cleanup(struct ath_softc *sc,
790 struct ath_descdma *dd,
791 struct list_head *head)
792{
793 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
794 dd->dd_desc_paddr);
795
796 INIT_LIST_HEAD(head);
797 kfree(dd->dd_bufptr);
798 memset(dd, 0, sizeof(*dd));
799}
800
801/************************/
802/* Module Hooks */
803/************************/
804
805static int __init ath9k_init(void)
806{
807 int error;
808
809 /* Register rate control algorithm */
810 error = ath_rate_control_register();
811 if (error != 0) {
812 printk(KERN_ERR
813 "ath9k: Unable to register rate control "
814 "algorithm: %d\n",
815 error);
816 goto err_out;
817 }
818
819 error = ath9k_debug_create_root();
820 if (error) {
821 printk(KERN_ERR
822 "ath9k: Unable to create debugfs root: %d\n",
823 error);
824 goto err_rate_unregister;
825 }
826
827 error = ath_pci_init();
828 if (error < 0) {
829 printk(KERN_ERR
830 "ath9k: No PCI devices found, driver not installed.\n");
831 error = -ENODEV;
832 goto err_remove_root;
833 }
834
835 error = ath_ahb_init();
836 if (error < 0) {
837 error = -ENODEV;
838 goto err_pci_exit;
839 }
840
841 return 0;
842
843 err_pci_exit:
844 ath_pci_exit();
845
846 err_remove_root:
847 ath9k_debug_remove_root();
848 err_rate_unregister:
849 ath_rate_control_unregister();
850 err_out:
851 return error;
852}
853module_init(ath9k_init);
854
855static void __exit ath9k_exit(void)
856{
857 ath_ahb_exit();
858 ath_pci_exit();
859 ath9k_debug_remove_root();
860 ath_rate_control_unregister();
861 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
862}
863module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index e185479e295e..29851e6376a9 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -167,6 +167,40 @@ struct ath_rx_status {
167#define ATH9K_RXKEYIX_INVALID ((u8)-1) 167#define ATH9K_RXKEYIX_INVALID ((u8)-1)
168#define ATH9K_TXKEYIX_INVALID ((u32)-1) 168#define ATH9K_TXKEYIX_INVALID ((u32)-1)
169 169
170enum ath9k_phyerr {
171 ATH9K_PHYERR_UNDERRUN = 0, /* Transmit underrun */
172 ATH9K_PHYERR_TIMING = 1, /* Timing error */
173 ATH9K_PHYERR_PARITY = 2, /* Illegal parity */
174 ATH9K_PHYERR_RATE = 3, /* Illegal rate */
175 ATH9K_PHYERR_LENGTH = 4, /* Illegal length */
176 ATH9K_PHYERR_RADAR = 5, /* Radar detect */
177 ATH9K_PHYERR_SERVICE = 6, /* Illegal service */
178 ATH9K_PHYERR_TOR = 7, /* Transmit override receive */
179
180 ATH9K_PHYERR_OFDM_TIMING = 17,
181 ATH9K_PHYERR_OFDM_SIGNAL_PARITY = 18,
182 ATH9K_PHYERR_OFDM_RATE_ILLEGAL = 19,
183 ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL = 20,
184 ATH9K_PHYERR_OFDM_POWER_DROP = 21,
185 ATH9K_PHYERR_OFDM_SERVICE = 22,
186 ATH9K_PHYERR_OFDM_RESTART = 23,
187 ATH9K_PHYERR_FALSE_RADAR_EXT = 24,
188
189 ATH9K_PHYERR_CCK_TIMING = 25,
190 ATH9K_PHYERR_CCK_HEADER_CRC = 26,
191 ATH9K_PHYERR_CCK_RATE_ILLEGAL = 27,
192 ATH9K_PHYERR_CCK_SERVICE = 30,
193 ATH9K_PHYERR_CCK_RESTART = 31,
194 ATH9K_PHYERR_CCK_LENGTH_ILLEGAL = 32,
195 ATH9K_PHYERR_CCK_POWER_DROP = 33,
196
197 ATH9K_PHYERR_HT_CRC_ERROR = 34,
198 ATH9K_PHYERR_HT_LENGTH_ILLEGAL = 35,
199 ATH9K_PHYERR_HT_RATE_ILLEGAL = 36,
200
201 ATH9K_PHYERR_MAX = 37,
202};
203
170struct ath_desc { 204struct ath_desc {
171 u32 ds_link; 205 u32 ds_link;
172 u32 ds_data; 206 u32 ds_data;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 643bea35686f..67ca4e5a6017 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -18,118 +18,6 @@
18#include "ath9k.h" 18#include "ath9k.h"
19#include "btcoex.h" 19#include "btcoex.h"
20 20
21static char *dev_info = "ath9k";
22
23MODULE_AUTHOR("Atheros Communications");
24MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
25MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
26MODULE_LICENSE("Dual BSD/GPL");
27
28static int modparam_nohwcrypt;
29module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
30MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
31
32static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
33module_param_named(debug, ath9k_debug, uint, 0);
34MODULE_PARM_DESC(debug, "Debugging mask");
35
36/* We use the hw_value as an index into our private channel structure */
37
38#define CHAN2G(_freq, _idx) { \
39 .center_freq = (_freq), \
40 .hw_value = (_idx), \
41 .max_power = 20, \
42}
43
44#define CHAN5G(_freq, _idx) { \
45 .band = IEEE80211_BAND_5GHZ, \
46 .center_freq = (_freq), \
47 .hw_value = (_idx), \
48 .max_power = 20, \
49}
50
51/* Some 2 GHz radios are actually tunable on 2312-2732
52 * on 5 MHz steps, we support the channels which we know
53 * we have calibration data for all cards though to make
54 * this static */
55static struct ieee80211_channel ath9k_2ghz_chantable[] = {
56 CHAN2G(2412, 0), /* Channel 1 */
57 CHAN2G(2417, 1), /* Channel 2 */
58 CHAN2G(2422, 2), /* Channel 3 */
59 CHAN2G(2427, 3), /* Channel 4 */
60 CHAN2G(2432, 4), /* Channel 5 */
61 CHAN2G(2437, 5), /* Channel 6 */
62 CHAN2G(2442, 6), /* Channel 7 */
63 CHAN2G(2447, 7), /* Channel 8 */
64 CHAN2G(2452, 8), /* Channel 9 */
65 CHAN2G(2457, 9), /* Channel 10 */
66 CHAN2G(2462, 10), /* Channel 11 */
67 CHAN2G(2467, 11), /* Channel 12 */
68 CHAN2G(2472, 12), /* Channel 13 */
69 CHAN2G(2484, 13), /* Channel 14 */
70};
71
72/* Some 5 GHz radios are actually tunable on XXXX-YYYY
73 * on 5 MHz steps, we support the channels which we know
74 * we have calibration data for all cards though to make
75 * this static */
76static struct ieee80211_channel ath9k_5ghz_chantable[] = {
77 /* _We_ call this UNII 1 */
78 CHAN5G(5180, 14), /* Channel 36 */
79 CHAN5G(5200, 15), /* Channel 40 */
80 CHAN5G(5220, 16), /* Channel 44 */
81 CHAN5G(5240, 17), /* Channel 48 */
82 /* _We_ call this UNII 2 */
83 CHAN5G(5260, 18), /* Channel 52 */
84 CHAN5G(5280, 19), /* Channel 56 */
85 CHAN5G(5300, 20), /* Channel 60 */
86 CHAN5G(5320, 21), /* Channel 64 */
87 /* _We_ call this "Middle band" */
88 CHAN5G(5500, 22), /* Channel 100 */
89 CHAN5G(5520, 23), /* Channel 104 */
90 CHAN5G(5540, 24), /* Channel 108 */
91 CHAN5G(5560, 25), /* Channel 112 */
92 CHAN5G(5580, 26), /* Channel 116 */
93 CHAN5G(5600, 27), /* Channel 120 */
94 CHAN5G(5620, 28), /* Channel 124 */
95 CHAN5G(5640, 29), /* Channel 128 */
96 CHAN5G(5660, 30), /* Channel 132 */
97 CHAN5G(5680, 31), /* Channel 136 */
98 CHAN5G(5700, 32), /* Channel 140 */
99 /* _We_ call this UNII 3 */
100 CHAN5G(5745, 33), /* Channel 149 */
101 CHAN5G(5765, 34), /* Channel 153 */
102 CHAN5G(5785, 35), /* Channel 157 */
103 CHAN5G(5805, 36), /* Channel 161 */
104 CHAN5G(5825, 37), /* Channel 165 */
105};
106
107/* Atheros hardware rate code addition for short premble */
108#define SHPCHECK(__hw_rate, __flags) \
109 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
110
111#define RATE(_bitrate, _hw_rate, _flags) { \
112 .bitrate = (_bitrate), \
113 .flags = (_flags), \
114 .hw_value = (_hw_rate), \
115 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
116}
117
118static struct ieee80211_rate ath9k_legacy_rates[] = {
119 RATE(10, 0x1b, 0),
120 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
121 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
122 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
123 RATE(60, 0x0b, 0),
124 RATE(90, 0x0f, 0),
125 RATE(120, 0x0a, 0),
126 RATE(180, 0x0e, 0),
127 RATE(240, 0x09, 0),
128 RATE(360, 0x0d, 0),
129 RATE(480, 0x08, 0),
130 RATE(540, 0x0c, 0),
131};
132
133static void ath_cache_conf_rate(struct ath_softc *sc, 21static void ath_cache_conf_rate(struct ath_softc *sc,
134 struct ieee80211_conf *conf) 22 struct ieee80211_conf *conf)
135{ 23{
@@ -221,7 +109,7 @@ static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
221 return channel; 109 return channel;
222} 110}
223 111
224static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode) 112bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
225{ 113{
226 unsigned long flags; 114 unsigned long flags;
227 bool ret; 115 bool ret;
@@ -255,11 +143,13 @@ void ath9k_ps_restore(struct ath_softc *sc)
255 if (--sc->ps_usecount != 0) 143 if (--sc->ps_usecount != 0)
256 goto unlock; 144 goto unlock;
257 145
258 if (sc->ps_enabled && 146 if (sc->ps_idle)
259 !(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 147 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
260 SC_OP_WAIT_FOR_CAB | 148 else if (sc->ps_enabled &&
261 SC_OP_WAIT_FOR_PSPOLL_DATA | 149 !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
262 SC_OP_WAIT_FOR_TX_ACK))) 150 PS_WAIT_FOR_CAB |
151 PS_WAIT_FOR_PSPOLL_DATA |
152 PS_WAIT_FOR_TX_ACK)))
263 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP); 153 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
264 154
265 unlock: 155 unlock:
@@ -316,7 +206,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
316 r = ath9k_hw_reset(ah, hchan, fastcc); 206 r = ath9k_hw_reset(ah, hchan, fastcc);
317 if (r) { 207 if (r) {
318 ath_print(common, ATH_DBG_FATAL, 208 ath_print(common, ATH_DBG_FATAL,
319 "Unable to reset channel (%u Mhz) " 209 "Unable to reset channel (%u MHz), "
320 "reset status %d\n", 210 "reset status %d\n",
321 channel->center_freq, r); 211 channel->center_freq, r);
322 spin_unlock_bh(&sc->sc_resetlock); 212 spin_unlock_bh(&sc->sc_resetlock);
@@ -349,7 +239,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
349 * When the task is complete, it reschedules itself depending on the 239 * When the task is complete, it reschedules itself depending on the
350 * appropriate interval that was calculated. 240 * appropriate interval that was calculated.
351 */ 241 */
352static void ath_ani_calibrate(unsigned long data) 242void ath_ani_calibrate(unsigned long data)
353{ 243{
354 struct ath_softc *sc = (struct ath_softc *)data; 244 struct ath_softc *sc = (struct ath_softc *)data;
355 struct ath_hw *ah = sc->sc_ah; 245 struct ath_hw *ah = sc->sc_ah;
@@ -363,14 +253,6 @@ static void ath_ani_calibrate(unsigned long data)
363 short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ? 253 short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
364 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL; 254 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
365 255
366 /*
367 * don't calibrate when we're scanning.
368 * we are most likely not on our home channel.
369 */
370 spin_lock(&sc->ani_lock);
371 if (sc->sc_flags & SC_OP_SCANNING)
372 goto set_timer;
373
374 /* Only calibrate if awake */ 256 /* Only calibrate if awake */
375 if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE) 257 if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
376 goto set_timer; 258 goto set_timer;
@@ -437,7 +319,6 @@ static void ath_ani_calibrate(unsigned long data)
437 ath9k_ps_restore(sc); 319 ath9k_ps_restore(sc);
438 320
439set_timer: 321set_timer:
440 spin_unlock(&sc->ani_lock);
441 /* 322 /*
442 * Set timer interval based on previous results. 323 * Set timer interval based on previous results.
443 * The interval must be the shortest necessary to satisfy ANI, 324 * The interval must be the shortest necessary to satisfy ANI,
@@ -513,7 +394,7 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
513 ath_tx_node_cleanup(sc, an); 394 ath_tx_node_cleanup(sc, an);
514} 395}
515 396
516static void ath9k_tasklet(unsigned long data) 397void ath9k_tasklet(unsigned long data)
517{ 398{
518 struct ath_softc *sc = (struct ath_softc *)data; 399 struct ath_softc *sc = (struct ath_softc *)data;
519 struct ath_hw *ah = sc->sc_ah; 400 struct ath_hw *ah = sc->sc_ah;
@@ -545,7 +426,7 @@ static void ath9k_tasklet(unsigned long data)
545 */ 426 */
546 ath_print(common, ATH_DBG_PS, 427 ath_print(common, ATH_DBG_PS,
547 "TSFOOR - Sync with next Beacon\n"); 428 "TSFOOR - Sync with next Beacon\n");
548 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON | SC_OP_BEACON_SYNC; 429 sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
549 } 430 }
550 431
551 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) 432 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
@@ -646,7 +527,7 @@ irqreturn_t ath_isr(int irq, void *dev)
646 * receive frames */ 527 * receive frames */
647 ath9k_setpower(sc, ATH9K_PM_AWAKE); 528 ath9k_setpower(sc, ATH9K_PM_AWAKE);
648 ath9k_hw_setrxabort(sc->sc_ah, 0); 529 ath9k_hw_setrxabort(sc->sc_ah, 0);
649 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON; 530 sc->ps_flags |= PS_WAIT_FOR_BEACON;
650 } 531 }
651 532
652chip_reset: 533chip_reset:
@@ -928,49 +809,12 @@ static void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf
928 809
929 clear_bit(key->hw_key_idx + 64, common->keymap); 810 clear_bit(key->hw_key_idx + 64, common->keymap);
930 if (common->splitmic) { 811 if (common->splitmic) {
812 ath9k_hw_keyreset(ah, key->hw_key_idx + 32);
931 clear_bit(key->hw_key_idx + 32, common->keymap); 813 clear_bit(key->hw_key_idx + 32, common->keymap);
932 clear_bit(key->hw_key_idx + 64 + 32, common->keymap); 814 clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
933 } 815 }
934} 816}
935 817
936static void setup_ht_cap(struct ath_softc *sc,
937 struct ieee80211_sta_ht_cap *ht_info)
938{
939 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
940 u8 tx_streams, rx_streams;
941
942 ht_info->ht_supported = true;
943 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
944 IEEE80211_HT_CAP_SM_PS |
945 IEEE80211_HT_CAP_SGI_40 |
946 IEEE80211_HT_CAP_DSSSCCK40;
947
948 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
949 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
950
951 /* set up supported mcs set */
952 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
953 tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
954 1 : 2;
955 rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
956 1 : 2;
957
958 if (tx_streams != rx_streams) {
959 ath_print(common, ATH_DBG_CONFIG,
960 "TX streams %d, RX streams: %d\n",
961 tx_streams, rx_streams);
962 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
963 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
964 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
965 }
966
967 ht_info->mcs.rx_mask[0] = 0xff;
968 if (rx_streams >= 2)
969 ht_info->mcs.rx_mask[1] = 0xff;
970
971 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
972}
973
974static void ath9k_bss_assoc_info(struct ath_softc *sc, 818static void ath9k_bss_assoc_info(struct ath_softc *sc,
975 struct ieee80211_vif *vif, 819 struct ieee80211_vif *vif,
976 struct ieee80211_bss_conf *bss_conf) 820 struct ieee80211_bss_conf *bss_conf)
@@ -992,7 +836,7 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
992 * on the receipt of the first Beacon frame (i.e., 836 * on the receipt of the first Beacon frame (i.e.,
993 * after time sync with the AP). 837 * after time sync with the AP).
994 */ 838 */
995 sc->sc_flags |= SC_OP_BEACON_SYNC; 839 sc->ps_flags |= PS_BEACON_SYNC;
996 840
997 /* Configure the beacon */ 841 /* Configure the beacon */
998 ath_beacon_config(sc, vif); 842 ath_beacon_config(sc, vif);
@@ -1009,174 +853,6 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
1009 } 853 }
1010} 854}
1011 855
1012/********************************/
1013/* LED functions */
1014/********************************/
1015
1016static void ath_led_blink_work(struct work_struct *work)
1017{
1018 struct ath_softc *sc = container_of(work, struct ath_softc,
1019 ath_led_blink_work.work);
1020
1021 if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
1022 return;
1023
1024 if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
1025 (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
1026 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
1027 else
1028 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
1029 (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
1030
1031 ieee80211_queue_delayed_work(sc->hw,
1032 &sc->ath_led_blink_work,
1033 (sc->sc_flags & SC_OP_LED_ON) ?
1034 msecs_to_jiffies(sc->led_off_duration) :
1035 msecs_to_jiffies(sc->led_on_duration));
1036
1037 sc->led_on_duration = sc->led_on_cnt ?
1038 max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
1039 ATH_LED_ON_DURATION_IDLE;
1040 sc->led_off_duration = sc->led_off_cnt ?
1041 max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
1042 ATH_LED_OFF_DURATION_IDLE;
1043 sc->led_on_cnt = sc->led_off_cnt = 0;
1044 if (sc->sc_flags & SC_OP_LED_ON)
1045 sc->sc_flags &= ~SC_OP_LED_ON;
1046 else
1047 sc->sc_flags |= SC_OP_LED_ON;
1048}
1049
1050static void ath_led_brightness(struct led_classdev *led_cdev,
1051 enum led_brightness brightness)
1052{
1053 struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
1054 struct ath_softc *sc = led->sc;
1055
1056 switch (brightness) {
1057 case LED_OFF:
1058 if (led->led_type == ATH_LED_ASSOC ||
1059 led->led_type == ATH_LED_RADIO) {
1060 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
1061 (led->led_type == ATH_LED_RADIO));
1062 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
1063 if (led->led_type == ATH_LED_RADIO)
1064 sc->sc_flags &= ~SC_OP_LED_ON;
1065 } else {
1066 sc->led_off_cnt++;
1067 }
1068 break;
1069 case LED_FULL:
1070 if (led->led_type == ATH_LED_ASSOC) {
1071 sc->sc_flags |= SC_OP_LED_ASSOCIATED;
1072 ieee80211_queue_delayed_work(sc->hw,
1073 &sc->ath_led_blink_work, 0);
1074 } else if (led->led_type == ATH_LED_RADIO) {
1075 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
1076 sc->sc_flags |= SC_OP_LED_ON;
1077 } else {
1078 sc->led_on_cnt++;
1079 }
1080 break;
1081 default:
1082 break;
1083 }
1084}
1085
1086static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
1087 char *trigger)
1088{
1089 int ret;
1090
1091 led->sc = sc;
1092 led->led_cdev.name = led->name;
1093 led->led_cdev.default_trigger = trigger;
1094 led->led_cdev.brightness_set = ath_led_brightness;
1095
1096 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
1097 if (ret)
1098 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1099 "Failed to register led:%s", led->name);
1100 else
1101 led->registered = 1;
1102 return ret;
1103}
1104
1105static void ath_unregister_led(struct ath_led *led)
1106{
1107 if (led->registered) {
1108 led_classdev_unregister(&led->led_cdev);
1109 led->registered = 0;
1110 }
1111}
1112
1113static void ath_deinit_leds(struct ath_softc *sc)
1114{
1115 ath_unregister_led(&sc->assoc_led);
1116 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
1117 ath_unregister_led(&sc->tx_led);
1118 ath_unregister_led(&sc->rx_led);
1119 ath_unregister_led(&sc->radio_led);
1120 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
1121}
1122
1123static void ath_init_leds(struct ath_softc *sc)
1124{
1125 char *trigger;
1126 int ret;
1127
1128 if (AR_SREV_9287(sc->sc_ah))
1129 sc->sc_ah->led_pin = ATH_LED_PIN_9287;
1130 else
1131 sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
1132
1133 /* Configure gpio 1 for output */
1134 ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
1135 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1136 /* LED off, active low */
1137 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
1138
1139 INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
1140
1141 trigger = ieee80211_get_radio_led_name(sc->hw);
1142 snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
1143 "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
1144 ret = ath_register_led(sc, &sc->radio_led, trigger);
1145 sc->radio_led.led_type = ATH_LED_RADIO;
1146 if (ret)
1147 goto fail;
1148
1149 trigger = ieee80211_get_assoc_led_name(sc->hw);
1150 snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
1151 "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
1152 ret = ath_register_led(sc, &sc->assoc_led, trigger);
1153 sc->assoc_led.led_type = ATH_LED_ASSOC;
1154 if (ret)
1155 goto fail;
1156
1157 trigger = ieee80211_get_tx_led_name(sc->hw);
1158 snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
1159 "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
1160 ret = ath_register_led(sc, &sc->tx_led, trigger);
1161 sc->tx_led.led_type = ATH_LED_TX;
1162 if (ret)
1163 goto fail;
1164
1165 trigger = ieee80211_get_rx_led_name(sc->hw);
1166 snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
1167 "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
1168 ret = ath_register_led(sc, &sc->rx_led, trigger);
1169 sc->rx_led.led_type = ATH_LED_RX;
1170 if (ret)
1171 goto fail;
1172
1173 return;
1174
1175fail:
1176 cancel_delayed_work_sync(&sc->ath_led_blink_work);
1177 ath_deinit_leds(sc);
1178}
1179
1180void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw) 856void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
1181{ 857{
1182 struct ath_hw *ah = sc->sc_ah; 858 struct ath_hw *ah = sc->sc_ah;
@@ -1194,7 +870,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
1194 r = ath9k_hw_reset(ah, ah->curchan, false); 870 r = ath9k_hw_reset(ah, ah->curchan, false);
1195 if (r) { 871 if (r) {
1196 ath_print(common, ATH_DBG_FATAL, 872 ath_print(common, ATH_DBG_FATAL,
1197 "Unable to reset channel %u (%uMhz) ", 873 "Unable to reset channel (%u MHz), "
1198 "reset status %d\n", 874 "reset status %d\n",
1199 channel->center_freq, r); 875 channel->center_freq, r);
1200 } 876 }
@@ -1249,7 +925,7 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
1249 r = ath9k_hw_reset(ah, ah->curchan, false); 925 r = ath9k_hw_reset(ah, ah->curchan, false);
1250 if (r) { 926 if (r) {
1251 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 927 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1252 "Unable to reset channel %u (%uMhz) " 928 "Unable to reset channel (%u MHz), "
1253 "reset status %d\n", 929 "reset status %d\n",
1254 channel->center_freq, r); 930 channel->center_freq, r);
1255 } 931 }
@@ -1261,711 +937,6 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
1261 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP); 937 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
1262} 938}
1263 939
1264/*******************/
1265/* Rfkill */
1266/*******************/
1267
1268static bool ath_is_rfkill_set(struct ath_softc *sc)
1269{
1270 struct ath_hw *ah = sc->sc_ah;
1271
1272 return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
1273 ah->rfkill_polarity;
1274}
1275
1276static void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
1277{
1278 struct ath_wiphy *aphy = hw->priv;
1279 struct ath_softc *sc = aphy->sc;
1280 bool blocked = !!ath_is_rfkill_set(sc);
1281
1282 wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
1283}
1284
1285static void ath_start_rfkill_poll(struct ath_softc *sc)
1286{
1287 struct ath_hw *ah = sc->sc_ah;
1288
1289 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1290 wiphy_rfkill_start_polling(sc->hw->wiphy);
1291}
1292
1293static void ath9k_uninit_hw(struct ath_softc *sc)
1294{
1295 struct ath_hw *ah = sc->sc_ah;
1296
1297 BUG_ON(!ah);
1298
1299 ath9k_exit_debug(ah);
1300 ath9k_hw_detach(ah);
1301 sc->sc_ah = NULL;
1302}
1303
1304static void ath_clean_core(struct ath_softc *sc)
1305{
1306 struct ieee80211_hw *hw = sc->hw;
1307 struct ath_hw *ah = sc->sc_ah;
1308 int i = 0;
1309
1310 ath9k_ps_wakeup(sc);
1311
1312 dev_dbg(sc->dev, "Detach ATH hw\n");
1313
1314 ath_deinit_leds(sc);
1315 wiphy_rfkill_stop_polling(sc->hw->wiphy);
1316
1317 for (i = 0; i < sc->num_sec_wiphy; i++) {
1318 struct ath_wiphy *aphy = sc->sec_wiphy[i];
1319 if (aphy == NULL)
1320 continue;
1321 sc->sec_wiphy[i] = NULL;
1322 ieee80211_unregister_hw(aphy->hw);
1323 ieee80211_free_hw(aphy->hw);
1324 }
1325 ieee80211_unregister_hw(hw);
1326 ath_rx_cleanup(sc);
1327 ath_tx_cleanup(sc);
1328
1329 tasklet_kill(&sc->intr_tq);
1330 tasklet_kill(&sc->bcon_tasklet);
1331
1332 if (!(sc->sc_flags & SC_OP_INVALID))
1333 ath9k_setpower(sc, ATH9K_PM_AWAKE);
1334
1335 /* cleanup tx queues */
1336 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1337 if (ATH_TXQ_SETUP(sc, i))
1338 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1339
1340 if ((sc->btcoex.no_stomp_timer) &&
1341 ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
1342 ath_gen_timer_free(ah, sc->btcoex.no_stomp_timer);
1343}
1344
1345void ath_detach(struct ath_softc *sc)
1346{
1347 ath_clean_core(sc);
1348 ath9k_uninit_hw(sc);
1349}
1350
1351void ath_cleanup(struct ath_softc *sc)
1352{
1353 struct ath_hw *ah = sc->sc_ah;
1354 struct ath_common *common = ath9k_hw_common(ah);
1355
1356 ath_clean_core(sc);
1357 free_irq(sc->irq, sc);
1358 ath_bus_cleanup(common);
1359 kfree(sc->sec_wiphy);
1360 ieee80211_free_hw(sc->hw);
1361
1362 ath9k_uninit_hw(sc);
1363}
1364
1365static int ath9k_reg_notifier(struct wiphy *wiphy,
1366 struct regulatory_request *request)
1367{
1368 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1369 struct ath_wiphy *aphy = hw->priv;
1370 struct ath_softc *sc = aphy->sc;
1371 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
1372
1373 return ath_reg_notifier_apply(wiphy, request, reg);
1374}
1375
1376/*
1377 * Detects if there is any priority bt traffic
1378 */
1379static void ath_detect_bt_priority(struct ath_softc *sc)
1380{
1381 struct ath_btcoex *btcoex = &sc->btcoex;
1382 struct ath_hw *ah = sc->sc_ah;
1383
1384 if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio))
1385 btcoex->bt_priority_cnt++;
1386
1387 if (time_after(jiffies, btcoex->bt_priority_time +
1388 msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
1389 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
1390 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
1391 "BT priority traffic detected");
1392 sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
1393 } else {
1394 sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
1395 }
1396
1397 btcoex->bt_priority_cnt = 0;
1398 btcoex->bt_priority_time = jiffies;
1399 }
1400}
1401
1402/*
1403 * Configures appropriate weight based on stomp type.
1404 */
1405static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
1406 enum ath_stomp_type stomp_type)
1407{
1408 struct ath_hw *ah = sc->sc_ah;
1409
1410 switch (stomp_type) {
1411 case ATH_BTCOEX_STOMP_ALL:
1412 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
1413 AR_STOMP_ALL_WLAN_WGHT);
1414 break;
1415 case ATH_BTCOEX_STOMP_LOW:
1416 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
1417 AR_STOMP_LOW_WLAN_WGHT);
1418 break;
1419 case ATH_BTCOEX_STOMP_NONE:
1420 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
1421 AR_STOMP_NONE_WLAN_WGHT);
1422 break;
1423 default:
1424 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
1425 "Invalid Stomptype\n");
1426 break;
1427 }
1428
1429 ath9k_hw_btcoex_enable(ah);
1430}
1431
1432static void ath9k_gen_timer_start(struct ath_hw *ah,
1433 struct ath_gen_timer *timer,
1434 u32 timer_next,
1435 u32 timer_period)
1436{
1437 struct ath_common *common = ath9k_hw_common(ah);
1438 struct ath_softc *sc = (struct ath_softc *) common->priv;
1439
1440 ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
1441
1442 if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
1443 ath9k_hw_set_interrupts(ah, 0);
1444 sc->imask |= ATH9K_INT_GENTIMER;
1445 ath9k_hw_set_interrupts(ah, sc->imask);
1446 }
1447}
1448
1449static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
1450{
1451 struct ath_common *common = ath9k_hw_common(ah);
1452 struct ath_softc *sc = (struct ath_softc *) common->priv;
1453 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
1454
1455 ath9k_hw_gen_timer_stop(ah, timer);
1456
1457 /* if no timer is enabled, turn off interrupt mask */
1458 if (timer_table->timer_mask.val == 0) {
1459 ath9k_hw_set_interrupts(ah, 0);
1460 sc->imask &= ~ATH9K_INT_GENTIMER;
1461 ath9k_hw_set_interrupts(ah, sc->imask);
1462 }
1463}
1464
1465/*
1466 * This is the master bt coex timer which runs for every
1467 * 45ms, bt traffic will be given priority during 55% of this
1468 * period while wlan gets remaining 45%
1469 */
1470static void ath_btcoex_period_timer(unsigned long data)
1471{
1472 struct ath_softc *sc = (struct ath_softc *) data;
1473 struct ath_hw *ah = sc->sc_ah;
1474 struct ath_btcoex *btcoex = &sc->btcoex;
1475
1476 ath_detect_bt_priority(sc);
1477
1478 spin_lock_bh(&btcoex->btcoex_lock);
1479
1480 ath9k_btcoex_bt_stomp(sc, btcoex->bt_stomp_type);
1481
1482 spin_unlock_bh(&btcoex->btcoex_lock);
1483
1484 if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
1485 if (btcoex->hw_timer_enabled)
1486 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
1487
1488 ath9k_gen_timer_start(ah,
1489 btcoex->no_stomp_timer,
1490 (ath9k_hw_gettsf32(ah) +
1491 btcoex->btcoex_no_stomp),
1492 btcoex->btcoex_no_stomp * 10);
1493 btcoex->hw_timer_enabled = true;
1494 }
1495
1496 mod_timer(&btcoex->period_timer, jiffies +
1497 msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
1498}
1499
1500/*
1501 * Generic tsf based hw timer which configures weight
1502 * registers to time slice between wlan and bt traffic
1503 */
1504static void ath_btcoex_no_stomp_timer(void *arg)
1505{
1506 struct ath_softc *sc = (struct ath_softc *)arg;
1507 struct ath_hw *ah = sc->sc_ah;
1508 struct ath_btcoex *btcoex = &sc->btcoex;
1509
1510 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
1511 "no stomp timer running \n");
1512
1513 spin_lock_bh(&btcoex->btcoex_lock);
1514
1515 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW)
1516 ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE);
1517 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
1518 ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW);
1519
1520 spin_unlock_bh(&btcoex->btcoex_lock);
1521}
1522
1523static int ath_init_btcoex_timer(struct ath_softc *sc)
1524{
1525 struct ath_btcoex *btcoex = &sc->btcoex;
1526
1527 btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
1528 btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
1529 btcoex->btcoex_period / 100;
1530
1531 setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
1532 (unsigned long) sc);
1533
1534 spin_lock_init(&btcoex->btcoex_lock);
1535
1536 btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
1537 ath_btcoex_no_stomp_timer,
1538 ath_btcoex_no_stomp_timer,
1539 (void *) sc, AR_FIRST_NDP_TIMER);
1540
1541 if (!btcoex->no_stomp_timer)
1542 return -ENOMEM;
1543
1544 return 0;
1545}
1546
1547/*
1548 * Read and write, they both share the same lock. We do this to serialize
1549 * reads and writes on Atheros 802.11n PCI devices only. This is required
1550 * as the FIFO on these devices can only accept sanely 2 requests. After
1551 * that the device goes bananas. Serializing the reads/writes prevents this
1552 * from happening.
1553 */
1554
1555static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
1556{
1557 struct ath_hw *ah = (struct ath_hw *) hw_priv;
1558 struct ath_common *common = ath9k_hw_common(ah);
1559 struct ath_softc *sc = (struct ath_softc *) common->priv;
1560
1561 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
1562 unsigned long flags;
1563 spin_lock_irqsave(&sc->sc_serial_rw, flags);
1564 iowrite32(val, sc->mem + reg_offset);
1565 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
1566 } else
1567 iowrite32(val, sc->mem + reg_offset);
1568}
1569
1570static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
1571{
1572 struct ath_hw *ah = (struct ath_hw *) hw_priv;
1573 struct ath_common *common = ath9k_hw_common(ah);
1574 struct ath_softc *sc = (struct ath_softc *) common->priv;
1575 u32 val;
1576
1577 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
1578 unsigned long flags;
1579 spin_lock_irqsave(&sc->sc_serial_rw, flags);
1580 val = ioread32(sc->mem + reg_offset);
1581 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
1582 } else
1583 val = ioread32(sc->mem + reg_offset);
1584 return val;
1585}
1586
1587static const struct ath_ops ath9k_common_ops = {
1588 .read = ath9k_ioread32,
1589 .write = ath9k_iowrite32,
1590};
1591
1592/*
1593 * Initialize and fill ath_softc, ath_sofct is the
1594 * "Software Carrier" struct. Historically it has existed
1595 * to allow the separation between hardware specific
1596 * variables (now in ath_hw) and driver specific variables.
1597 */
1598static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
1599 const struct ath_bus_ops *bus_ops)
1600{
1601 struct ath_hw *ah = NULL;
1602 struct ath_common *common;
1603 int r = 0, i;
1604 int csz = 0;
1605 int qnum;
1606
1607 /* XXX: hardware will not be ready until ath_open() being called */
1608 sc->sc_flags |= SC_OP_INVALID;
1609
1610 spin_lock_init(&sc->wiphy_lock);
1611 spin_lock_init(&sc->sc_resetlock);
1612 spin_lock_init(&sc->sc_serial_rw);
1613 spin_lock_init(&sc->ani_lock);
1614 spin_lock_init(&sc->sc_pm_lock);
1615 mutex_init(&sc->mutex);
1616 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1617 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
1618 (unsigned long)sc);
1619
1620 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
1621 if (!ah)
1622 return -ENOMEM;
1623
1624 ah->hw_version.devid = devid;
1625 ah->hw_version.subsysid = subsysid;
1626 sc->sc_ah = ah;
1627
1628 common = ath9k_hw_common(ah);
1629 common->ops = &ath9k_common_ops;
1630 common->bus_ops = bus_ops;
1631 common->ah = ah;
1632 common->hw = sc->hw;
1633 common->priv = sc;
1634 common->debug_mask = ath9k_debug;
1635
1636 /*
1637 * Cache line size is used to size and align various
1638 * structures used to communicate with the hardware.
1639 */
1640 ath_read_cachesize(common, &csz);
1641 /* XXX assert csz is non-zero */
1642 common->cachelsz = csz << 2; /* convert to bytes */
1643
1644 r = ath9k_hw_init(ah);
1645 if (r) {
1646 ath_print(common, ATH_DBG_FATAL,
1647 "Unable to initialize hardware; "
1648 "initialization status: %d\n", r);
1649 goto bad_free_hw;
1650 }
1651
1652 if (ath9k_init_debug(ah) < 0) {
1653 ath_print(common, ATH_DBG_FATAL,
1654 "Unable to create debugfs files\n");
1655 goto bad_free_hw;
1656 }
1657
1658 /* Get the hardware key cache size. */
1659 common->keymax = ah->caps.keycache_size;
1660 if (common->keymax > ATH_KEYMAX) {
1661 ath_print(common, ATH_DBG_ANY,
1662 "Warning, using only %u entries in %u key cache\n",
1663 ATH_KEYMAX, common->keymax);
1664 common->keymax = ATH_KEYMAX;
1665 }
1666
1667 /*
1668 * Reset the key cache since some parts do not
1669 * reset the contents on initial power up.
1670 */
1671 for (i = 0; i < common->keymax; i++)
1672 ath9k_hw_keyreset(ah, (u16) i);
1673
1674 /* default to MONITOR mode */
1675 sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
1676
1677 /*
1678 * Allocate hardware transmit queues: one queue for
1679 * beacon frames and one data queue for each QoS
1680 * priority. Note that the hal handles reseting
1681 * these queues at the needed time.
1682 */
1683 sc->beacon.beaconq = ath9k_hw_beaconq_setup(ah);
1684 if (sc->beacon.beaconq == -1) {
1685 ath_print(common, ATH_DBG_FATAL,
1686 "Unable to setup a beacon xmit queue\n");
1687 r = -EIO;
1688 goto bad2;
1689 }
1690 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1691 if (sc->beacon.cabq == NULL) {
1692 ath_print(common, ATH_DBG_FATAL,
1693 "Unable to setup CAB xmit queue\n");
1694 r = -EIO;
1695 goto bad2;
1696 }
1697
1698 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
1699 ath_cabq_update(sc);
1700
1701 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
1702 sc->tx.hwq_map[i] = -1;
1703
1704 /* Setup data queues */
1705 /* NB: ensure BK queue is the lowest priority h/w queue */
1706 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1707 ath_print(common, ATH_DBG_FATAL,
1708 "Unable to setup xmit queue for BK traffic\n");
1709 r = -EIO;
1710 goto bad2;
1711 }
1712
1713 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1714 ath_print(common, ATH_DBG_FATAL,
1715 "Unable to setup xmit queue for BE traffic\n");
1716 r = -EIO;
1717 goto bad2;
1718 }
1719 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1720 ath_print(common, ATH_DBG_FATAL,
1721 "Unable to setup xmit queue for VI traffic\n");
1722 r = -EIO;
1723 goto bad2;
1724 }
1725 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1726 ath_print(common, ATH_DBG_FATAL,
1727 "Unable to setup xmit queue for VO traffic\n");
1728 r = -EIO;
1729 goto bad2;
1730 }
1731
1732 /* Initializes the noise floor to a reasonable default value.
1733 * Later on this will be updated during ANI processing. */
1734
1735 common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1736 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
1737
1738 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1739 ATH9K_CIPHER_TKIP, NULL)) {
1740 /*
1741 * Whether we should enable h/w TKIP MIC.
1742 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1743 * report WMM capable, so it's always safe to turn on
1744 * TKIP MIC in this case.
1745 */
1746 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1747 0, 1, NULL);
1748 }
1749
1750 /*
1751 * Check whether the separate key cache entries
1752 * are required to handle both tx+rx MIC keys.
1753 * With split mic keys the number of stations is limited
1754 * to 27 otherwise 59.
1755 */
1756 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1757 ATH9K_CIPHER_TKIP, NULL)
1758 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1759 ATH9K_CIPHER_MIC, NULL)
1760 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1761 0, NULL))
1762 common->splitmic = 1;
1763
1764 /* turn on mcast key search if possible */
1765 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1766 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1767 1, NULL);
1768
1769 sc->config.txpowlimit = ATH_TXPOWER_MAX;
1770
1771 /* 11n Capabilities */
1772 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1773 sc->sc_flags |= SC_OP_TXAGGR;
1774 sc->sc_flags |= SC_OP_RXAGGR;
1775 }
1776
1777 common->tx_chainmask = ah->caps.tx_chainmask;
1778 common->rx_chainmask = ah->caps.rx_chainmask;
1779
1780 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1781 sc->rx.defant = ath9k_hw_getdefantenna(ah);
1782
1783 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
1784 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
1785
1786 sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1787
1788 /* initialize beacon slots */
1789 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
1790 sc->beacon.bslot[i] = NULL;
1791 sc->beacon.bslot_aphy[i] = NULL;
1792 }
1793
1794 /* setup channels and rates */
1795
1796 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
1797 sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
1798 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
1799 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
1800 ARRAY_SIZE(ath9k_2ghz_chantable);
1801 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
1802 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
1803 ARRAY_SIZE(ath9k_legacy_rates);
1804 }
1805
1806 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
1807 sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
1808 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
1809 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
1810 ARRAY_SIZE(ath9k_5ghz_chantable);
1811 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
1812 ath9k_legacy_rates + 4;
1813 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
1814 ARRAY_SIZE(ath9k_legacy_rates) - 4;
1815 }
1816
1817 switch (ah->btcoex_hw.scheme) {
1818 case ATH_BTCOEX_CFG_NONE:
1819 break;
1820 case ATH_BTCOEX_CFG_2WIRE:
1821 ath9k_hw_btcoex_init_2wire(ah);
1822 break;
1823 case ATH_BTCOEX_CFG_3WIRE:
1824 ath9k_hw_btcoex_init_3wire(ah);
1825 r = ath_init_btcoex_timer(sc);
1826 if (r)
1827 goto bad2;
1828 qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
1829 ath9k_hw_init_btcoex_hw(ah, qnum);
1830 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
1831 break;
1832 default:
1833 WARN_ON(1);
1834 break;
1835 }
1836
1837 return 0;
1838bad2:
1839 /* cleanup tx queues */
1840 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1841 if (ATH_TXQ_SETUP(sc, i))
1842 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1843
1844bad_free_hw:
1845 ath9k_uninit_hw(sc);
1846 return r;
1847}
1848
1849void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1850{
1851 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1852 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1853 IEEE80211_HW_SIGNAL_DBM |
1854 IEEE80211_HW_AMPDU_AGGREGATION |
1855 IEEE80211_HW_SUPPORTS_PS |
1856 IEEE80211_HW_PS_NULLFUNC_STACK |
1857 IEEE80211_HW_SPECTRUM_MGMT;
1858
1859 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
1860 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
1861
1862 hw->wiphy->interface_modes =
1863 BIT(NL80211_IFTYPE_AP) |
1864 BIT(NL80211_IFTYPE_STATION) |
1865 BIT(NL80211_IFTYPE_ADHOC) |
1866 BIT(NL80211_IFTYPE_MESH_POINT);
1867
1868 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1869
1870 hw->queues = 4;
1871 hw->max_rates = 4;
1872 hw->channel_change_time = 5000;
1873 hw->max_listen_interval = 10;
1874 /* Hardware supports 10 but we use 4 */
1875 hw->max_rate_tries = 4;
1876 hw->sta_data_size = sizeof(struct ath_node);
1877 hw->vif_data_size = sizeof(struct ath_vif);
1878
1879 hw->rate_control_algorithm = "ath9k_rate_control";
1880
1881 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
1882 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1883 &sc->sbands[IEEE80211_BAND_2GHZ];
1884 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
1885 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1886 &sc->sbands[IEEE80211_BAND_5GHZ];
1887}
1888
1889/* Device driver core initialization */
1890int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
1891 const struct ath_bus_ops *bus_ops)
1892{
1893 struct ieee80211_hw *hw = sc->hw;
1894 struct ath_common *common;
1895 struct ath_hw *ah;
1896 int error = 0, i;
1897 struct ath_regulatory *reg;
1898
1899 dev_dbg(sc->dev, "Attach ATH hw\n");
1900
1901 error = ath_init_softc(devid, sc, subsysid, bus_ops);
1902 if (error != 0)
1903 return error;
1904
1905 ah = sc->sc_ah;
1906 common = ath9k_hw_common(ah);
1907
1908 /* get mac address from hardware and set in mac80211 */
1909
1910 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
1911
1912 ath_set_hw_capab(sc, hw);
1913
1914 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
1915 ath9k_reg_notifier);
1916 if (error)
1917 return error;
1918
1919 reg = &common->regulatory;
1920
1921 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1922 if (test_bit(ATH9K_MODE_11G, ah->caps.wireless_modes))
1923 setup_ht_cap(sc,
1924 &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
1925 if (test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes))
1926 setup_ht_cap(sc,
1927 &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
1928 }
1929
1930 /* initialize tx/rx engine */
1931 error = ath_tx_init(sc, ATH_TXBUF);
1932 if (error != 0)
1933 goto error_attach;
1934
1935 error = ath_rx_init(sc, ATH_RXBUF);
1936 if (error != 0)
1937 goto error_attach;
1938
1939 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
1940 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
1941 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
1942
1943 error = ieee80211_register_hw(hw);
1944
1945 if (!ath_is_world_regd(reg)) {
1946 error = regulatory_hint(hw->wiphy, reg->alpha2);
1947 if (error)
1948 goto error_attach;
1949 }
1950
1951 /* Initialize LED control */
1952 ath_init_leds(sc);
1953
1954 ath_start_rfkill_poll(sc);
1955
1956 return 0;
1957
1958error_attach:
1959 /* cleanup tx queues */
1960 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1961 if (ATH_TXQ_SETUP(sc, i))
1962 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1963
1964 ath9k_uninit_hw(sc);
1965
1966 return error;
1967}
1968
1969int ath_reset(struct ath_softc *sc, bool retry_tx) 940int ath_reset(struct ath_softc *sc, bool retry_tx)
1970{ 941{
1971 struct ath_hw *ah = sc->sc_ah; 942 struct ath_hw *ah = sc->sc_ah;
@@ -1976,6 +947,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
1976 /* Stop ANI */ 947 /* Stop ANI */
1977 del_timer_sync(&common->ani.timer); 948 del_timer_sync(&common->ani.timer);
1978 949
950 ieee80211_stop_queues(hw);
951
1979 ath9k_hw_set_interrupts(ah, 0); 952 ath9k_hw_set_interrupts(ah, 0);
1980 ath_drain_all_txq(sc, retry_tx); 953 ath_drain_all_txq(sc, retry_tx);
1981 ath_stoprecv(sc); 954 ath_stoprecv(sc);
@@ -2017,131 +990,14 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
2017 } 990 }
2018 } 991 }
2019 992
993 ieee80211_wake_queues(hw);
994
2020 /* Start ANI */ 995 /* Start ANI */
2021 ath_start_ani(common); 996 ath_start_ani(common);
2022 997
2023 return r; 998 return r;
2024} 999}
2025 1000
2026/*
2027 * This function will allocate both the DMA descriptor structure, and the
2028 * buffers it contains. These are used to contain the descriptors used
2029 * by the system.
2030*/
2031int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
2032 struct list_head *head, const char *name,
2033 int nbuf, int ndesc)
2034{
2035#define DS2PHYS(_dd, _ds) \
2036 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2037#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
2038#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
2039 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2040 struct ath_desc *ds;
2041 struct ath_buf *bf;
2042 int i, bsize, error;
2043
2044 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
2045 name, nbuf, ndesc);
2046
2047 INIT_LIST_HEAD(head);
2048 /* ath_desc must be a multiple of DWORDs */
2049 if ((sizeof(struct ath_desc) % 4) != 0) {
2050 ath_print(common, ATH_DBG_FATAL,
2051 "ath_desc not DWORD aligned\n");
2052 BUG_ON((sizeof(struct ath_desc) % 4) != 0);
2053 error = -ENOMEM;
2054 goto fail;
2055 }
2056
2057 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
2058
2059 /*
2060 * Need additional DMA memory because we can't use
2061 * descriptors that cross the 4K page boundary. Assume
2062 * one skipped descriptor per 4K page.
2063 */
2064 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
2065 u32 ndesc_skipped =
2066 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
2067 u32 dma_len;
2068
2069 while (ndesc_skipped) {
2070 dma_len = ndesc_skipped * sizeof(struct ath_desc);
2071 dd->dd_desc_len += dma_len;
2072
2073 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
2074 };
2075 }
2076
2077 /* allocate descriptors */
2078 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2079 &dd->dd_desc_paddr, GFP_KERNEL);
2080 if (dd->dd_desc == NULL) {
2081 error = -ENOMEM;
2082 goto fail;
2083 }
2084 ds = dd->dd_desc;
2085 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
2086 name, ds, (u32) dd->dd_desc_len,
2087 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
2088
2089 /* allocate buffers */
2090 bsize = sizeof(struct ath_buf) * nbuf;
2091 bf = kzalloc(bsize, GFP_KERNEL);
2092 if (bf == NULL) {
2093 error = -ENOMEM;
2094 goto fail2;
2095 }
2096 dd->dd_bufptr = bf;
2097
2098 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
2099 bf->bf_desc = ds;
2100 bf->bf_daddr = DS2PHYS(dd, ds);
2101
2102 if (!(sc->sc_ah->caps.hw_caps &
2103 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
2104 /*
2105 * Skip descriptor addresses which can cause 4KB
2106 * boundary crossing (addr + length) with a 32 dword
2107 * descriptor fetch.
2108 */
2109 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
2110 BUG_ON((caddr_t) bf->bf_desc >=
2111 ((caddr_t) dd->dd_desc +
2112 dd->dd_desc_len));
2113
2114 ds += ndesc;
2115 bf->bf_desc = ds;
2116 bf->bf_daddr = DS2PHYS(dd, ds);
2117 }
2118 }
2119 list_add_tail(&bf->list, head);
2120 }
2121 return 0;
2122fail2:
2123 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2124 dd->dd_desc_paddr);
2125fail:
2126 memset(dd, 0, sizeof(*dd));
2127 return error;
2128#undef ATH_DESC_4KB_BOUND_CHECK
2129#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
2130#undef DS2PHYS
2131}
2132
2133void ath_descdma_cleanup(struct ath_softc *sc,
2134 struct ath_descdma *dd,
2135 struct list_head *head)
2136{
2137 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2138 dd->dd_desc_paddr);
2139
2140 INIT_LIST_HEAD(head);
2141 kfree(dd->dd_bufptr);
2142 memset(dd, 0, sizeof(*dd));
2143}
2144
2145int ath_get_hal_qnum(u16 queue, struct ath_softc *sc) 1001int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
2146{ 1002{
2147 int qnum; 1003 int qnum;
@@ -2220,28 +1076,6 @@ void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
2220/* mac80211 callbacks */ 1076/* mac80211 callbacks */
2221/**********************/ 1077/**********************/
2222 1078
2223/*
2224 * (Re)start btcoex timers
2225 */
2226static void ath9k_btcoex_timer_resume(struct ath_softc *sc)
2227{
2228 struct ath_btcoex *btcoex = &sc->btcoex;
2229 struct ath_hw *ah = sc->sc_ah;
2230
2231 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
2232 "Starting btcoex timers");
2233
2234 /* make sure duty cycle timer is also stopped when resuming */
2235 if (btcoex->hw_timer_enabled)
2236 ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
2237
2238 btcoex->bt_priority_cnt = 0;
2239 btcoex->bt_priority_time = jiffies;
2240 sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
2241
2242 mod_timer(&btcoex->period_timer, jiffies);
2243}
2244
2245static int ath9k_start(struct ieee80211_hw *hw) 1079static int ath9k_start(struct ieee80211_hw *hw)
2246{ 1080{
2247 struct ath_wiphy *aphy = hw->priv; 1081 struct ath_wiphy *aphy = hw->priv;
@@ -2411,11 +1245,11 @@ static int ath9k_tx(struct ieee80211_hw *hw,
2411 if (ieee80211_is_pspoll(hdr->frame_control)) { 1245 if (ieee80211_is_pspoll(hdr->frame_control)) {
2412 ath_print(common, ATH_DBG_PS, 1246 ath_print(common, ATH_DBG_PS,
2413 "Sending PS-Poll to pick a buffered frame\n"); 1247 "Sending PS-Poll to pick a buffered frame\n");
2414 sc->sc_flags |= SC_OP_WAIT_FOR_PSPOLL_DATA; 1248 sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA;
2415 } else { 1249 } else {
2416 ath_print(common, ATH_DBG_PS, 1250 ath_print(common, ATH_DBG_PS,
2417 "Wake up to complete TX\n"); 1251 "Wake up to complete TX\n");
2418 sc->sc_flags |= SC_OP_WAIT_FOR_TX_ACK; 1252 sc->ps_flags |= PS_WAIT_FOR_TX_ACK;
2419 } 1253 }
2420 /* 1254 /*
2421 * The actual restore operation will happen only after 1255 * The actual restore operation will happen only after
@@ -2468,22 +1302,6 @@ exit:
2468 return 0; 1302 return 0;
2469} 1303}
2470 1304
2471/*
2472 * Pause btcoex timer and bt duty cycle timer
2473 */
2474static void ath9k_btcoex_timer_pause(struct ath_softc *sc)
2475{
2476 struct ath_btcoex *btcoex = &sc->btcoex;
2477 struct ath_hw *ah = sc->sc_ah;
2478
2479 del_timer_sync(&btcoex->period_timer);
2480
2481 if (btcoex->hw_timer_enabled)
2482 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
2483
2484 btcoex->hw_timer_enabled = false;
2485}
2486
2487static void ath9k_stop(struct ieee80211_hw *hw) 1305static void ath9k_stop(struct ieee80211_hw *hw)
2488{ 1306{
2489 struct ath_wiphy *aphy = hw->priv; 1307 struct ath_wiphy *aphy = hw->priv;
@@ -2550,12 +1368,12 @@ static void ath9k_stop(struct ieee80211_hw *hw)
2550} 1368}
2551 1369
2552static int ath9k_add_interface(struct ieee80211_hw *hw, 1370static int ath9k_add_interface(struct ieee80211_hw *hw,
2553 struct ieee80211_if_init_conf *conf) 1371 struct ieee80211_vif *vif)
2554{ 1372{
2555 struct ath_wiphy *aphy = hw->priv; 1373 struct ath_wiphy *aphy = hw->priv;
2556 struct ath_softc *sc = aphy->sc; 1374 struct ath_softc *sc = aphy->sc;
2557 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1375 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2558 struct ath_vif *avp = (void *)conf->vif->drv_priv; 1376 struct ath_vif *avp = (void *)vif->drv_priv;
2559 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED; 1377 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
2560 int ret = 0; 1378 int ret = 0;
2561 1379
@@ -2567,7 +1385,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
2567 goto out; 1385 goto out;
2568 } 1386 }
2569 1387
2570 switch (conf->type) { 1388 switch (vif->type) {
2571 case NL80211_IFTYPE_STATION: 1389 case NL80211_IFTYPE_STATION:
2572 ic_opmode = NL80211_IFTYPE_STATION; 1390 ic_opmode = NL80211_IFTYPE_STATION;
2573 break; 1391 break;
@@ -2578,11 +1396,11 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
2578 ret = -ENOBUFS; 1396 ret = -ENOBUFS;
2579 goto out; 1397 goto out;
2580 } 1398 }
2581 ic_opmode = conf->type; 1399 ic_opmode = vif->type;
2582 break; 1400 break;
2583 default: 1401 default:
2584 ath_print(common, ATH_DBG_FATAL, 1402 ath_print(common, ATH_DBG_FATAL,
2585 "Interface type %d not yet supported\n", conf->type); 1403 "Interface type %d not yet supported\n", vif->type);
2586 ret = -EOPNOTSUPP; 1404 ret = -EOPNOTSUPP;
2587 goto out; 1405 goto out;
2588 } 1406 }
@@ -2614,18 +1432,18 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
2614 * Enable MIB interrupts when there are hardware phy counters. 1432 * Enable MIB interrupts when there are hardware phy counters.
2615 * Note we only do this (at the moment) for station mode. 1433 * Note we only do this (at the moment) for station mode.
2616 */ 1434 */
2617 if ((conf->type == NL80211_IFTYPE_STATION) || 1435 if ((vif->type == NL80211_IFTYPE_STATION) ||
2618 (conf->type == NL80211_IFTYPE_ADHOC) || 1436 (vif->type == NL80211_IFTYPE_ADHOC) ||
2619 (conf->type == NL80211_IFTYPE_MESH_POINT)) { 1437 (vif->type == NL80211_IFTYPE_MESH_POINT)) {
2620 sc->imask |= ATH9K_INT_MIB; 1438 sc->imask |= ATH9K_INT_MIB;
2621 sc->imask |= ATH9K_INT_TSFOOR; 1439 sc->imask |= ATH9K_INT_TSFOOR;
2622 } 1440 }
2623 1441
2624 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); 1442 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
2625 1443
2626 if (conf->type == NL80211_IFTYPE_AP || 1444 if (vif->type == NL80211_IFTYPE_AP ||
2627 conf->type == NL80211_IFTYPE_ADHOC || 1445 vif->type == NL80211_IFTYPE_ADHOC ||
2628 conf->type == NL80211_IFTYPE_MONITOR) 1446 vif->type == NL80211_IFTYPE_MONITOR)
2629 ath_start_ani(common); 1447 ath_start_ani(common);
2630 1448
2631out: 1449out:
@@ -2634,12 +1452,12 @@ out:
2634} 1452}
2635 1453
2636static void ath9k_remove_interface(struct ieee80211_hw *hw, 1454static void ath9k_remove_interface(struct ieee80211_hw *hw,
2637 struct ieee80211_if_init_conf *conf) 1455 struct ieee80211_vif *vif)
2638{ 1456{
2639 struct ath_wiphy *aphy = hw->priv; 1457 struct ath_wiphy *aphy = hw->priv;
2640 struct ath_softc *sc = aphy->sc; 1458 struct ath_softc *sc = aphy->sc;
2641 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1459 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2642 struct ath_vif *avp = (void *)conf->vif->drv_priv; 1460 struct ath_vif *avp = (void *)vif->drv_priv;
2643 int i; 1461 int i;
2644 1462
2645 ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n"); 1463 ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
@@ -2662,7 +1480,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
2662 sc->sc_flags &= ~SC_OP_BEACONS; 1480 sc->sc_flags &= ~SC_OP_BEACONS;
2663 1481
2664 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) { 1482 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
2665 if (sc->beacon.bslot[i] == conf->vif) { 1483 if (sc->beacon.bslot[i] == vif) {
2666 printk(KERN_DEBUG "%s: vif had allocated beacon " 1484 printk(KERN_DEBUG "%s: vif had allocated beacon "
2667 "slot\n", __func__); 1485 "slot\n", __func__);
2668 sc->beacon.bslot[i] = NULL; 1486 sc->beacon.bslot[i] = NULL;
@@ -2675,6 +1493,19 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
2675 mutex_unlock(&sc->mutex); 1493 mutex_unlock(&sc->mutex);
2676} 1494}
2677 1495
1496void ath9k_enable_ps(struct ath_softc *sc)
1497{
1498 sc->ps_enabled = true;
1499 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
1500 if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
1501 sc->imask |= ATH9K_INT_TIM_TIMER;
1502 ath9k_hw_set_interrupts(sc->sc_ah,
1503 sc->imask);
1504 }
1505 }
1506 ath9k_hw_setrxabort(sc->sc_ah, 1);
1507}
1508
2678static int ath9k_config(struct ieee80211_hw *hw, u32 changed) 1509static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2679{ 1510{
2680 struct ath_wiphy *aphy = hw->priv; 1511 struct ath_wiphy *aphy = hw->priv;
@@ -2713,6 +1544,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2713 spin_unlock_bh(&sc->wiphy_lock); 1544 spin_unlock_bh(&sc->wiphy_lock);
2714 1545
2715 if (enable_radio) { 1546 if (enable_radio) {
1547 sc->ps_idle = false;
2716 ath_radio_enable(sc, hw); 1548 ath_radio_enable(sc, hw);
2717 ath_print(common, ATH_DBG_CONFIG, 1549 ath_print(common, ATH_DBG_CONFIG,
2718 "not-idle: enabling radio\n"); 1550 "not-idle: enabling radio\n");
@@ -2727,36 +1559,27 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2727 */ 1559 */
2728 if (changed & IEEE80211_CONF_CHANGE_PS) { 1560 if (changed & IEEE80211_CONF_CHANGE_PS) {
2729 if (conf->flags & IEEE80211_CONF_PS) { 1561 if (conf->flags & IEEE80211_CONF_PS) {
2730 sc->sc_flags |= SC_OP_PS_ENABLED; 1562 sc->ps_flags |= PS_ENABLED;
2731 if (!(ah->caps.hw_caps &
2732 ATH9K_HW_CAP_AUTOSLEEP)) {
2733 if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
2734 sc->imask |= ATH9K_INT_TIM_TIMER;
2735 ath9k_hw_set_interrupts(sc->sc_ah,
2736 sc->imask);
2737 }
2738 }
2739 /* 1563 /*
2740 * At this point we know hardware has received an ACK 1564 * At this point we know hardware has received an ACK
2741 * of a previously sent null data frame. 1565 * of a previously sent null data frame.
2742 */ 1566 */
2743 if ((sc->sc_flags & SC_OP_NULLFUNC_COMPLETED)) { 1567 if ((sc->ps_flags & PS_NULLFUNC_COMPLETED)) {
2744 sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED; 1568 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
2745 sc->ps_enabled = true; 1569 ath9k_enable_ps(sc);
2746 ath9k_hw_setrxabort(sc->sc_ah, 1);
2747 } 1570 }
2748 } else { 1571 } else {
2749 sc->ps_enabled = false; 1572 sc->ps_enabled = false;
2750 sc->sc_flags &= ~(SC_OP_PS_ENABLED | 1573 sc->ps_flags &= ~(PS_ENABLED |
2751 SC_OP_NULLFUNC_COMPLETED); 1574 PS_NULLFUNC_COMPLETED);
2752 ath9k_setpower(sc, ATH9K_PM_AWAKE); 1575 ath9k_setpower(sc, ATH9K_PM_AWAKE);
2753 if (!(ah->caps.hw_caps & 1576 if (!(ah->caps.hw_caps &
2754 ATH9K_HW_CAP_AUTOSLEEP)) { 1577 ATH9K_HW_CAP_AUTOSLEEP)) {
2755 ath9k_hw_setrxabort(sc->sc_ah, 0); 1578 ath9k_hw_setrxabort(sc->sc_ah, 0);
2756 sc->sc_flags &= ~(SC_OP_WAIT_FOR_BEACON | 1579 sc->ps_flags &= ~(PS_WAIT_FOR_BEACON |
2757 SC_OP_WAIT_FOR_CAB | 1580 PS_WAIT_FOR_CAB |
2758 SC_OP_WAIT_FOR_PSPOLL_DATA | 1581 PS_WAIT_FOR_PSPOLL_DATA |
2759 SC_OP_WAIT_FOR_TX_ACK); 1582 PS_WAIT_FOR_TX_ACK);
2760 if (sc->imask & ATH9K_INT_TIM_TIMER) { 1583 if (sc->imask & ATH9K_INT_TIM_TIMER) {
2761 sc->imask &= ~ATH9K_INT_TIM_TIMER; 1584 sc->imask &= ~ATH9K_INT_TIM_TIMER;
2762 ath9k_hw_set_interrupts(sc->sc_ah, 1585 ath9k_hw_set_interrupts(sc->sc_ah,
@@ -2766,6 +1589,14 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2766 } 1589 }
2767 } 1590 }
2768 1591
1592 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
1593 if (conf->flags & IEEE80211_CONF_MONITOR) {
1594 ath_print(common, ATH_DBG_CONFIG,
1595 "HW opmode set to Monitor mode\n");
1596 sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
1597 }
1598 }
1599
2769 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 1600 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2770 struct ieee80211_channel *curchan = hw->conf.channel; 1601 struct ieee80211_channel *curchan = hw->conf.channel;
2771 int pos = curchan->hw_value; 1602 int pos = curchan->hw_value;
@@ -2801,8 +1632,10 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2801 } 1632 }
2802 1633
2803skip_chan_change: 1634skip_chan_change:
2804 if (changed & IEEE80211_CONF_CHANGE_POWER) 1635 if (changed & IEEE80211_CONF_CHANGE_POWER) {
2805 sc->config.txpowlimit = 2 * conf->power_level; 1636 sc->config.txpowlimit = 2 * conf->power_level;
1637 ath_update_txpow(sc);
1638 }
2806 1639
2807 spin_lock_bh(&sc->wiphy_lock); 1640 spin_lock_bh(&sc->wiphy_lock);
2808 disable_radio = ath9k_all_wiphys_idle(sc); 1641 disable_radio = ath9k_all_wiphys_idle(sc);
@@ -2810,6 +1643,7 @@ skip_chan_change:
2810 1643
2811 if (disable_radio) { 1644 if (disable_radio) {
2812 ath_print(common, ATH_DBG_CONFIG, "idle: disabling radio\n"); 1645 ath_print(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
1646 sc->ps_idle = true;
2813 ath_radio_disable(sc, hw); 1647 ath_radio_disable(sc, hw);
2814 } 1648 }
2815 1649
@@ -2850,24 +1684,28 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
2850 "Set HW RX filter: 0x%x\n", rfilt); 1684 "Set HW RX filter: 0x%x\n", rfilt);
2851} 1685}
2852 1686
2853static void ath9k_sta_notify(struct ieee80211_hw *hw, 1687static int ath9k_sta_add(struct ieee80211_hw *hw,
2854 struct ieee80211_vif *vif, 1688 struct ieee80211_vif *vif,
2855 enum sta_notify_cmd cmd, 1689 struct ieee80211_sta *sta)
2856 struct ieee80211_sta *sta)
2857{ 1690{
2858 struct ath_wiphy *aphy = hw->priv; 1691 struct ath_wiphy *aphy = hw->priv;
2859 struct ath_softc *sc = aphy->sc; 1692 struct ath_softc *sc = aphy->sc;
2860 1693
2861 switch (cmd) { 1694 ath_node_attach(sc, sta);
2862 case STA_NOTIFY_ADD: 1695
2863 ath_node_attach(sc, sta); 1696 return 0;
2864 break; 1697}
2865 case STA_NOTIFY_REMOVE: 1698
2866 ath_node_detach(sc, sta); 1699static int ath9k_sta_remove(struct ieee80211_hw *hw,
2867 break; 1700 struct ieee80211_vif *vif,
2868 default: 1701 struct ieee80211_sta *sta)
2869 break; 1702{
2870 } 1703 struct ath_wiphy *aphy = hw->priv;
1704 struct ath_softc *sc = aphy->sc;
1705
1706 ath_node_detach(sc, sta);
1707
1708 return 0;
2871} 1709}
2872 1710
2873static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue, 1711static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
@@ -2966,6 +1804,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2966 struct ath_hw *ah = sc->sc_ah; 1804 struct ath_hw *ah = sc->sc_ah;
2967 struct ath_common *common = ath9k_hw_common(ah); 1805 struct ath_common *common = ath9k_hw_common(ah);
2968 struct ath_vif *avp = (void *)vif->drv_priv; 1806 struct ath_vif *avp = (void *)vif->drv_priv;
1807 int slottime;
2969 int error; 1808 int error;
2970 1809
2971 mutex_lock(&sc->mutex); 1810 mutex_lock(&sc->mutex);
@@ -3001,6 +1840,25 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
3001 ath_beacon_config(sc, vif); 1840 ath_beacon_config(sc, vif);
3002 } 1841 }
3003 1842
1843 if (changed & BSS_CHANGED_ERP_SLOT) {
1844 if (bss_conf->use_short_slot)
1845 slottime = 9;
1846 else
1847 slottime = 20;
1848 if (vif->type == NL80211_IFTYPE_AP) {
1849 /*
1850 * Defer update, so that connected stations can adjust
1851 * their settings at the same time.
1852 * See beacon.c for more details
1853 */
1854 sc->beacon.slottime = slottime;
1855 sc->beacon.updateslot = UPDATE;
1856 } else {
1857 ah->slottime = slottime;
1858 ath9k_hw_init_global_settings(ah);
1859 }
1860 }
1861
3004 /* Disable transmission of beacons */ 1862 /* Disable transmission of beacons */
3005 if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) 1863 if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon)
3006 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 1864 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
@@ -3133,6 +1991,7 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
3133{ 1991{
3134 struct ath_wiphy *aphy = hw->priv; 1992 struct ath_wiphy *aphy = hw->priv;
3135 struct ath_softc *sc = aphy->sc; 1993 struct ath_softc *sc = aphy->sc;
1994 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
3136 1995
3137 mutex_lock(&sc->mutex); 1996 mutex_lock(&sc->mutex);
3138 if (ath9k_wiphy_scanning(sc)) { 1997 if (ath9k_wiphy_scanning(sc)) {
@@ -3148,10 +2007,9 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
3148 2007
3149 aphy->state = ATH_WIPHY_SCAN; 2008 aphy->state = ATH_WIPHY_SCAN;
3150 ath9k_wiphy_pause_all_forced(sc, aphy); 2009 ath9k_wiphy_pause_all_forced(sc, aphy);
3151
3152 spin_lock_bh(&sc->ani_lock);
3153 sc->sc_flags |= SC_OP_SCANNING; 2010 sc->sc_flags |= SC_OP_SCANNING;
3154 spin_unlock_bh(&sc->ani_lock); 2011 del_timer_sync(&common->ani.timer);
2012 cancel_delayed_work_sync(&sc->tx_complete_work);
3155 mutex_unlock(&sc->mutex); 2013 mutex_unlock(&sc->mutex);
3156} 2014}
3157 2015
@@ -3159,17 +2017,30 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
3159{ 2017{
3160 struct ath_wiphy *aphy = hw->priv; 2018 struct ath_wiphy *aphy = hw->priv;
3161 struct ath_softc *sc = aphy->sc; 2019 struct ath_softc *sc = aphy->sc;
2020 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
3162 2021
3163 mutex_lock(&sc->mutex); 2022 mutex_lock(&sc->mutex);
3164 spin_lock_bh(&sc->ani_lock);
3165 aphy->state = ATH_WIPHY_ACTIVE; 2023 aphy->state = ATH_WIPHY_ACTIVE;
3166 sc->sc_flags &= ~SC_OP_SCANNING; 2024 sc->sc_flags &= ~SC_OP_SCANNING;
3167 sc->sc_flags |= SC_OP_FULL_RESET; 2025 sc->sc_flags |= SC_OP_FULL_RESET;
3168 spin_unlock_bh(&sc->ani_lock); 2026 ath_start_ani(common);
2027 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
3169 ath_beacon_config(sc, NULL); 2028 ath_beacon_config(sc, NULL);
3170 mutex_unlock(&sc->mutex); 2029 mutex_unlock(&sc->mutex);
3171} 2030}
3172 2031
2032static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
2033{
2034 struct ath_wiphy *aphy = hw->priv;
2035 struct ath_softc *sc = aphy->sc;
2036 struct ath_hw *ah = sc->sc_ah;
2037
2038 mutex_lock(&sc->mutex);
2039 ah->coverage_class = coverage_class;
2040 ath9k_hw_init_global_settings(ah);
2041 mutex_unlock(&sc->mutex);
2042}
2043
3173struct ieee80211_ops ath9k_ops = { 2044struct ieee80211_ops ath9k_ops = {
3174 .tx = ath9k_tx, 2045 .tx = ath9k_tx,
3175 .start = ath9k_start, 2046 .start = ath9k_start,
@@ -3178,7 +2049,8 @@ struct ieee80211_ops ath9k_ops = {
3178 .remove_interface = ath9k_remove_interface, 2049 .remove_interface = ath9k_remove_interface,
3179 .config = ath9k_config, 2050 .config = ath9k_config,
3180 .configure_filter = ath9k_configure_filter, 2051 .configure_filter = ath9k_configure_filter,
3181 .sta_notify = ath9k_sta_notify, 2052 .sta_add = ath9k_sta_add,
2053 .sta_remove = ath9k_sta_remove,
3182 .conf_tx = ath9k_conf_tx, 2054 .conf_tx = ath9k_conf_tx,
3183 .bss_info_changed = ath9k_bss_info_changed, 2055 .bss_info_changed = ath9k_bss_info_changed,
3184 .set_key = ath9k_set_key, 2056 .set_key = ath9k_set_key,
@@ -3189,64 +2061,5 @@ struct ieee80211_ops ath9k_ops = {
3189 .sw_scan_start = ath9k_sw_scan_start, 2061 .sw_scan_start = ath9k_sw_scan_start,
3190 .sw_scan_complete = ath9k_sw_scan_complete, 2062 .sw_scan_complete = ath9k_sw_scan_complete,
3191 .rfkill_poll = ath9k_rfkill_poll_state, 2063 .rfkill_poll = ath9k_rfkill_poll_state,
2064 .set_coverage_class = ath9k_set_coverage_class,
3192}; 2065};
3193
3194static int __init ath9k_init(void)
3195{
3196 int error;
3197
3198 /* Register rate control algorithm */
3199 error = ath_rate_control_register();
3200 if (error != 0) {
3201 printk(KERN_ERR
3202 "ath9k: Unable to register rate control "
3203 "algorithm: %d\n",
3204 error);
3205 goto err_out;
3206 }
3207
3208 error = ath9k_debug_create_root();
3209 if (error) {
3210 printk(KERN_ERR
3211 "ath9k: Unable to create debugfs root: %d\n",
3212 error);
3213 goto err_rate_unregister;
3214 }
3215
3216 error = ath_pci_init();
3217 if (error < 0) {
3218 printk(KERN_ERR
3219 "ath9k: No PCI devices found, driver not installed.\n");
3220 error = -ENODEV;
3221 goto err_remove_root;
3222 }
3223
3224 error = ath_ahb_init();
3225 if (error < 0) {
3226 error = -ENODEV;
3227 goto err_pci_exit;
3228 }
3229
3230 return 0;
3231
3232 err_pci_exit:
3233 ath_pci_exit();
3234
3235 err_remove_root:
3236 ath9k_debug_remove_root();
3237 err_rate_unregister:
3238 ath_rate_control_unregister();
3239 err_out:
3240 return error;
3241}
3242module_init(ath9k_init);
3243
3244static void __exit ath9k_exit(void)
3245{
3246 ath_ahb_exit();
3247 ath_pci_exit();
3248 ath9k_debug_remove_root();
3249 ath_rate_control_unregister();
3250 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
3251}
3252module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index f7af5ea54753..9441c6718a30 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -18,13 +18,14 @@
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include "ath9k.h" 19#include "ath9k.h"
20 20
21static struct pci_device_id ath_pci_id_table[] __devinitdata = { 21static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
22 { PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */ 22 { PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */
23 { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */ 23 { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
24 { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */ 24 { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
25 { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */ 25 { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
26 { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */ 26 { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
27 { PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */ 27 { PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */
28 { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
28 { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */ 29 { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
29 { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */ 30 { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
30 { 0 } 31 { 0 }
@@ -49,16 +50,6 @@ static void ath_pci_read_cachesize(struct ath_common *common, int *csz)
49 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */ 50 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
50} 51}
51 52
52static void ath_pci_cleanup(struct ath_common *common)
53{
54 struct ath_softc *sc = (struct ath_softc *) common->priv;
55 struct pci_dev *pdev = to_pci_dev(sc->dev);
56
57 pci_iounmap(pdev, sc->mem);
58 pci_disable_device(pdev);
59 pci_release_region(pdev, 0);
60}
61
62static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data) 53static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
63{ 54{
64 struct ath_hw *ah = (struct ath_hw *) common->ah; 55 struct ath_hw *ah = (struct ath_hw *) common->ah;
@@ -98,7 +89,6 @@ static void ath_pci_bt_coex_prep(struct ath_common *common)
98 89
99static const struct ath_bus_ops ath_pci_bus_ops = { 90static const struct ath_bus_ops ath_pci_bus_ops = {
100 .read_cachesize = ath_pci_read_cachesize, 91 .read_cachesize = ath_pci_read_cachesize,
101 .cleanup = ath_pci_cleanup,
102 .eeprom_read = ath_pci_eeprom_read, 92 .eeprom_read = ath_pci_eeprom_read,
103 .bt_coex_prep = ath_pci_bt_coex_prep, 93 .bt_coex_prep = ath_pci_bt_coex_prep,
104}; 94};
@@ -113,25 +103,22 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
113 u16 subsysid; 103 u16 subsysid;
114 u32 val; 104 u32 val;
115 int ret = 0; 105 int ret = 0;
116 struct ath_hw *ah;
117 char hw_name[64]; 106 char hw_name[64];
118 107
119 if (pci_enable_device(pdev)) 108 if (pci_enable_device(pdev))
120 return -EIO; 109 return -EIO;
121 110
122 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 111 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
123
124 if (ret) { 112 if (ret) {
125 printk(KERN_ERR "ath9k: 32-bit DMA not available\n"); 113 printk(KERN_ERR "ath9k: 32-bit DMA not available\n");
126 goto bad; 114 goto err_dma;
127 } 115 }
128 116
129 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 117 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
130
131 if (ret) { 118 if (ret) {
132 printk(KERN_ERR "ath9k: 32-bit DMA consistent " 119 printk(KERN_ERR "ath9k: 32-bit DMA consistent "
133 "DMA enable failed\n"); 120 "DMA enable failed\n");
134 goto bad; 121 goto err_dma;
135 } 122 }
136 123
137 /* 124 /*
@@ -171,22 +158,22 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
171 if (ret) { 158 if (ret) {
172 dev_err(&pdev->dev, "PCI memory region reserve error\n"); 159 dev_err(&pdev->dev, "PCI memory region reserve error\n");
173 ret = -ENODEV; 160 ret = -ENODEV;
174 goto bad; 161 goto err_region;
175 } 162 }
176 163
177 mem = pci_iomap(pdev, 0, 0); 164 mem = pci_iomap(pdev, 0, 0);
178 if (!mem) { 165 if (!mem) {
179 printk(KERN_ERR "PCI memory map error\n") ; 166 printk(KERN_ERR "PCI memory map error\n") ;
180 ret = -EIO; 167 ret = -EIO;
181 goto bad1; 168 goto err_iomap;
182 } 169 }
183 170
184 hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) + 171 hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) +
185 sizeof(struct ath_softc), &ath9k_ops); 172 sizeof(struct ath_softc), &ath9k_ops);
186 if (!hw) { 173 if (!hw) {
187 dev_err(&pdev->dev, "no memory for ieee80211_hw\n"); 174 dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
188 ret = -ENOMEM; 175 ret = -ENOMEM;
189 goto bad2; 176 goto err_alloc_hw;
190 } 177 }
191 178
192 SET_IEEE80211_DEV(hw, &pdev->dev); 179 SET_IEEE80211_DEV(hw, &pdev->dev);
@@ -201,25 +188,25 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
201 sc->dev = &pdev->dev; 188 sc->dev = &pdev->dev;
202 sc->mem = mem; 189 sc->mem = mem;
203 190
204 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid); 191 /* Will be cleared in ath9k_start() */
205 ret = ath_init_device(id->device, sc, subsysid, &ath_pci_bus_ops); 192 sc->sc_flags |= SC_OP_INVALID;
206 if (ret) {
207 dev_err(&pdev->dev, "failed to initialize device\n");
208 goto bad3;
209 }
210
211 /* setup interrupt service routine */
212 193
213 ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc); 194 ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
214 if (ret) { 195 if (ret) {
215 dev_err(&pdev->dev, "request_irq failed\n"); 196 dev_err(&pdev->dev, "request_irq failed\n");
216 goto bad4; 197 goto err_irq;
217 } 198 }
218 199
219 sc->irq = pdev->irq; 200 sc->irq = pdev->irq;
220 201
221 ah = sc->sc_ah; 202 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid);
222 ath9k_hw_name(ah, hw_name, sizeof(hw_name)); 203 ret = ath9k_init_device(id->device, sc, subsysid, &ath_pci_bus_ops);
204 if (ret) {
205 dev_err(&pdev->dev, "Failed to initialize device\n");
206 goto err_init;
207 }
208
209 ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
223 printk(KERN_INFO 210 printk(KERN_INFO
224 "%s: %s mem=0x%lx, irq=%d\n", 211 "%s: %s mem=0x%lx, irq=%d\n",
225 wiphy_name(hw->wiphy), 212 wiphy_name(hw->wiphy),
@@ -227,15 +214,18 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
227 (unsigned long)mem, pdev->irq); 214 (unsigned long)mem, pdev->irq);
228 215
229 return 0; 216 return 0;
230bad4: 217
231 ath_detach(sc); 218err_init:
232bad3: 219 free_irq(sc->irq, sc);
220err_irq:
233 ieee80211_free_hw(hw); 221 ieee80211_free_hw(hw);
234bad2: 222err_alloc_hw:
235 pci_iounmap(pdev, mem); 223 pci_iounmap(pdev, mem);
236bad1: 224err_iomap:
237 pci_release_region(pdev, 0); 225 pci_release_region(pdev, 0);
238bad: 226err_region:
227 /* Nothing */
228err_dma:
239 pci_disable_device(pdev); 229 pci_disable_device(pdev);
240 return ret; 230 return ret;
241} 231}
@@ -245,8 +235,15 @@ static void ath_pci_remove(struct pci_dev *pdev)
245 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 235 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
246 struct ath_wiphy *aphy = hw->priv; 236 struct ath_wiphy *aphy = hw->priv;
247 struct ath_softc *sc = aphy->sc; 237 struct ath_softc *sc = aphy->sc;
238 void __iomem *mem = sc->mem;
239
240 ath9k_deinit_device(sc);
241 free_irq(sc->irq, sc);
242 ieee80211_free_hw(sc->hw);
248 243
249 ath_cleanup(sc); 244 pci_iounmap(pdev, mem);
245 pci_disable_device(pdev);
246 pci_release_region(pdev, 0);
250} 247}
251 248
252#ifdef CONFIG_PM 249#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index 31de27dc0c4a..0999a495fd46 100644
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -384,6 +384,9 @@ bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
384 384
385#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0 385#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0
386 386
387#define AR_PHY_HEAVY_CLIP_FACTOR_RIFS 0x99EC
388#define AR_PHY_RIFS_INIT_DELAY 0x03ff0000
389
387#define AR_PHY_M_SLEEP 0x99f0 390#define AR_PHY_M_SLEEP 0x99f0
388#define AR_PHY_REFCLKDLY 0x99f4 391#define AR_PHY_REFCLKDLY 0x99f4
389#define AR_PHY_REFCLKPD 0x99f8 392#define AR_PHY_REFCLKPD 0x99f8
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 70fdb9d8db82..ac34a055c713 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -668,7 +668,7 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
668 struct ieee80211_tx_rate *rates = tx_info->control.rates; 668 struct ieee80211_tx_rate *rates = tx_info->control.rates;
669 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 669 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
670 __le16 fc = hdr->frame_control; 670 __le16 fc = hdr->frame_control;
671 u8 try_per_rate, i = 0, rix, nrix; 671 u8 try_per_rate, i = 0, rix;
672 int is_probe = 0; 672 int is_probe = 0;
673 673
674 if (rate_control_send_low(sta, priv_sta, txrc)) 674 if (rate_control_send_low(sta, priv_sta, txrc))
@@ -678,48 +678,47 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
678 * For Multi Rate Retry we use a different number of 678 * For Multi Rate Retry we use a different number of
679 * retry attempt counts. This ends up looking like this: 679 * retry attempt counts. This ends up looking like this:
680 * 680 *
681 * MRR[0] = 2 681 * MRR[0] = 4
682 * MRR[1] = 2 682 * MRR[1] = 4
683 * MRR[2] = 2 683 * MRR[2] = 4
684 * MRR[3] = 4 684 * MRR[3] = 8
685 * 685 *
686 */ 686 */
687 try_per_rate = sc->hw->max_rate_tries; 687 try_per_rate = 4;
688 688
689 rate_table = sc->cur_rate_table; 689 rate_table = sc->cur_rate_table;
690 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, &is_probe); 690 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, &is_probe);
691 nrix = rix;
692 691
693 if (is_probe) { 692 if (is_probe) {
694 /* set one try for probe rates. For the 693 /* set one try for probe rates. For the
695 * probes don't enable rts */ 694 * probes don't enable rts */
696 ath_rc_rate_set_series(rate_table, &rates[i++], txrc, 695 ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
697 1, nrix, 0); 696 1, rix, 0);
698 697
699 /* Get the next tried/allowed rate. No RTS for the next series 698 /* Get the next tried/allowed rate. No RTS for the next series
700 * after the probe rate 699 * after the probe rate
701 */ 700 */
702 ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &nrix); 701 ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
703 ath_rc_rate_set_series(rate_table, &rates[i++], txrc, 702 ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
704 try_per_rate, nrix, 0); 703 try_per_rate, rix, 0);
705 704
706 tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; 705 tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
707 } else { 706 } else {
708 /* Set the choosen rate. No RTS for first series entry. */ 707 /* Set the choosen rate. No RTS for first series entry. */
709 ath_rc_rate_set_series(rate_table, &rates[i++], txrc, 708 ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
710 try_per_rate, nrix, 0); 709 try_per_rate, rix, 0);
711 } 710 }
712 711
713 /* Fill in the other rates for multirate retry */ 712 /* Fill in the other rates for multirate retry */
714 for ( ; i < 4; i++) { 713 for ( ; i < 4; i++) {
715 /* Use twice the number of tries for the last MRR segment. */ 714 /* Use twice the number of tries for the last MRR segment. */
716 if (i + 1 == 4) 715 if (i + 1 == 4)
717 try_per_rate = 4; 716 try_per_rate = 8;
718 717
719 ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &nrix); 718 ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
720 /* All other rates in the series have RTS enabled */ 719 /* All other rates in the series have RTS enabled */
721 ath_rc_rate_set_series(rate_table, &rates[i], txrc, 720 ath_rc_rate_set_series(rate_table, &rates[i], txrc,
722 try_per_rate, nrix, 1); 721 try_per_rate, rix, 1);
723 } 722 }
724 723
725 /* 724 /*
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 9eb96f506998..4f6d6fd442f4 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -57,6 +57,10 @@ enum {
57 || (_phy == WLAN_RC_PHY_HT_40_DS) \ 57 || (_phy == WLAN_RC_PHY_HT_40_DS) \
58 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \ 58 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
59 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)) 59 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
60#define WLAN_RC_PHY_20(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS) \
61 || (_phy == WLAN_RC_PHY_HT_20_DS) \
62 || (_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
63 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI))
60#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \ 64#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \
61 || (_phy == WLAN_RC_PHY_HT_40_DS) \ 65 || (_phy == WLAN_RC_PHY_HT_40_DS) \
62 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \ 66 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 477365e5ae69..1ca42e5148c8 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -364,10 +364,10 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
364 if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) 364 if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
365 return; /* not from our current AP */ 365 return; /* not from our current AP */
366 366
367 sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON; 367 sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
368 368
369 if (sc->sc_flags & SC_OP_BEACON_SYNC) { 369 if (sc->ps_flags & PS_BEACON_SYNC) {
370 sc->sc_flags &= ~SC_OP_BEACON_SYNC; 370 sc->ps_flags &= ~PS_BEACON_SYNC;
371 ath_print(common, ATH_DBG_PS, 371 ath_print(common, ATH_DBG_PS,
372 "Reconfigure Beacon timers based on " 372 "Reconfigure Beacon timers based on "
373 "timestamp from the AP\n"); 373 "timestamp from the AP\n");
@@ -384,17 +384,17 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
384 */ 384 */
385 ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating " 385 ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating "
386 "buffered broadcast/multicast frame(s)\n"); 386 "buffered broadcast/multicast frame(s)\n");
387 sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON; 387 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
388 return; 388 return;
389 } 389 }
390 390
391 if (sc->sc_flags & SC_OP_WAIT_FOR_CAB) { 391 if (sc->ps_flags & PS_WAIT_FOR_CAB) {
392 /* 392 /*
393 * This can happen if a broadcast frame is dropped or the AP 393 * This can happen if a broadcast frame is dropped or the AP
394 * fails to send a frame indicating that all CAB frames have 394 * fails to send a frame indicating that all CAB frames have
395 * been delivered. 395 * been delivered.
396 */ 396 */
397 sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB; 397 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
398 ath_print(common, ATH_DBG_PS, 398 ath_print(common, ATH_DBG_PS,
399 "PS wait for CAB frames timed out\n"); 399 "PS wait for CAB frames timed out\n");
400 } 400 }
@@ -408,10 +408,10 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
408 hdr = (struct ieee80211_hdr *)skb->data; 408 hdr = (struct ieee80211_hdr *)skb->data;
409 409
410 /* Process Beacon and CAB receive in PS state */ 410 /* Process Beacon and CAB receive in PS state */
411 if ((sc->sc_flags & SC_OP_WAIT_FOR_BEACON) && 411 if ((sc->ps_flags & PS_WAIT_FOR_BEACON) &&
412 ieee80211_is_beacon(hdr->frame_control)) 412 ieee80211_is_beacon(hdr->frame_control))
413 ath_rx_ps_beacon(sc, skb); 413 ath_rx_ps_beacon(sc, skb);
414 else if ((sc->sc_flags & SC_OP_WAIT_FOR_CAB) && 414 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
415 (ieee80211_is_data(hdr->frame_control) || 415 (ieee80211_is_data(hdr->frame_control) ||
416 ieee80211_is_action(hdr->frame_control)) && 416 ieee80211_is_action(hdr->frame_control)) &&
417 is_multicast_ether_addr(hdr->addr1) && 417 is_multicast_ether_addr(hdr->addr1) &&
@@ -420,20 +420,20 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
420 * No more broadcast/multicast frames to be received at this 420 * No more broadcast/multicast frames to be received at this
421 * point. 421 * point.
422 */ 422 */
423 sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB; 423 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
424 ath_print(common, ATH_DBG_PS, 424 ath_print(common, ATH_DBG_PS,
425 "All PS CAB frames received, back to sleep\n"); 425 "All PS CAB frames received, back to sleep\n");
426 } else if ((sc->sc_flags & SC_OP_WAIT_FOR_PSPOLL_DATA) && 426 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
427 !is_multicast_ether_addr(hdr->addr1) && 427 !is_multicast_ether_addr(hdr->addr1) &&
428 !ieee80211_has_morefrags(hdr->frame_control)) { 428 !ieee80211_has_morefrags(hdr->frame_control)) {
429 sc->sc_flags &= ~SC_OP_WAIT_FOR_PSPOLL_DATA; 429 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
430 ath_print(common, ATH_DBG_PS, 430 ath_print(common, ATH_DBG_PS,
431 "Going back to sleep after having received " 431 "Going back to sleep after having received "
432 "PS-Poll data (0x%x)\n", 432 "PS-Poll data (0x%lx)\n",
433 sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 433 sc->ps_flags & (PS_WAIT_FOR_BEACON |
434 SC_OP_WAIT_FOR_CAB | 434 PS_WAIT_FOR_CAB |
435 SC_OP_WAIT_FOR_PSPOLL_DATA | 435 PS_WAIT_FOR_PSPOLL_DATA |
436 SC_OP_WAIT_FOR_TX_ACK)); 436 PS_WAIT_FOR_TX_ACK));
437 } 437 }
438} 438}
439 439
@@ -571,6 +571,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
571 hw = ath_get_virt_hw(sc, hdr); 571 hw = ath_get_virt_hw(sc, hdr);
572 rx_stats = &ds->ds_rxstat; 572 rx_stats = &ds->ds_rxstat;
573 573
574 ath_debug_stat_rx(sc, bf);
575
574 /* 576 /*
575 * If we're asked to flush receive queue, directly 577 * If we're asked to flush receive queue, directly
576 * chain it back at the queue without processing it. 578 * chain it back at the queue without processing it.
@@ -631,9 +633,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
631 sc->rx.rxotherant = 0; 633 sc->rx.rxotherant = 0;
632 } 634 }
633 635
634 if (unlikely(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 636 if (unlikely(sc->ps_flags & (PS_WAIT_FOR_BEACON |
635 SC_OP_WAIT_FOR_CAB | 637 PS_WAIT_FOR_CAB |
636 SC_OP_WAIT_FOR_PSPOLL_DATA))) 638 PS_WAIT_FOR_PSPOLL_DATA)))
637 ath_rx_ps(sc, skb); 639 ath_rx_ps(sc, skb);
638 640
639 ath_rx_send_to_mac80211(hw, sc, skb, rxs); 641 ath_rx_send_to_mac80211(hw, sc, skb, rxs);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 8e653fb937a1..72cfa8ebd9ae 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -1547,9 +1547,9 @@ enum {
1547 1547
1548#define AR_BT_COEX_WEIGHT 0x8174 1548#define AR_BT_COEX_WEIGHT 0x8174
1549#define AR_BT_COEX_WGHT 0xff55 1549#define AR_BT_COEX_WGHT 0xff55
1550#define AR_STOMP_ALL_WLAN_WGHT 0xffcc 1550#define AR_STOMP_ALL_WLAN_WGHT 0xfcfc
1551#define AR_STOMP_LOW_WLAN_WGHT 0xaaa8 1551#define AR_STOMP_LOW_WLAN_WGHT 0xa8a8
1552#define AR_STOMP_NONE_WLAN_WGHT 0xaa00 1552#define AR_STOMP_NONE_WLAN_WGHT 0x0000
1553#define AR_BTCOEX_BT_WGHT 0x0000ffff 1553#define AR_BTCOEX_BT_WGHT 0x0000ffff
1554#define AR_BTCOEX_BT_WGHT_S 0 1554#define AR_BTCOEX_BT_WGHT_S 0
1555#define AR_BTCOEX_WL_WGHT 0xffff0000 1555#define AR_BTCOEX_WL_WGHT 0xffff0000
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index cd26caaf44e7..a43fbf84dab9 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -152,7 +152,7 @@ int ath9k_wiphy_add(struct ath_softc *sc)
152 152
153 SET_IEEE80211_PERM_ADDR(hw, addr); 153 SET_IEEE80211_PERM_ADDR(hw, addr);
154 154
155 ath_set_hw_capab(sc, hw); 155 ath9k_set_hw_capab(sc, hw);
156 156
157 error = ieee80211_register_hw(hw); 157 error = ieee80211_register_hw(hw);
158 158
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 29bf33692f71..47294f90bbe5 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1498,26 +1498,6 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1498 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT) 1498 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
1499 ctsrate |= rate->hw_value_short; 1499 ctsrate |= rate->hw_value_short;
1500 1500
1501 /*
1502 * ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive.
1503 * Check the first rate in the series to decide whether RTS/CTS
1504 * or CTS-to-self has to be used.
1505 */
1506 if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
1507 flags = ATH9K_TXDESC_CTSENA;
1508 else if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
1509 flags = ATH9K_TXDESC_RTSENA;
1510
1511 /* FIXME: Handle aggregation protection */
1512 if (sc->config.ath_aggr_prot &&
1513 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
1514 flags = ATH9K_TXDESC_RTSENA;
1515 }
1516
1517 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1518 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1519 flags &= ~(ATH9K_TXDESC_RTSENA);
1520
1521 for (i = 0; i < 4; i++) { 1501 for (i = 0; i < 4; i++) {
1522 bool is_40, is_sgi, is_sp; 1502 bool is_40, is_sgi, is_sp;
1523 int phy; 1503 int phy;
@@ -1529,8 +1509,15 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1529 series[i].Tries = rates[i].count; 1509 series[i].Tries = rates[i].count;
1530 series[i].ChSel = common->tx_chainmask; 1510 series[i].ChSel = common->tx_chainmask;
1531 1511
1532 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) 1512 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1513 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
1533 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 1514 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1515 flags |= ATH9K_TXDESC_RTSENA;
1516 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1517 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1518 flags |= ATH9K_TXDESC_CTSENA;
1519 }
1520
1534 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 1521 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1535 series[i].RateFlags |= ATH9K_RATESERIES_2040; 1522 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1536 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 1523 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
@@ -1568,6 +1555,14 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1568 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp); 1555 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
1569 } 1556 }
1570 1557
1558 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1559 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1560 flags &= ~ATH9K_TXDESC_RTSENA;
1561
1562 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1563 if (flags & ATH9K_TXDESC_RTSENA)
1564 flags &= ~ATH9K_TXDESC_CTSENA;
1565
1571 /* set dur_update_en for l-sig computation except for PS-Poll frames */ 1566 /* set dur_update_en for l-sig computation except for PS-Poll frames */
1572 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc, 1567 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1573 bf->bf_lastbf->bf_desc, 1568 bf->bf_lastbf->bf_desc,
@@ -1648,7 +1643,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1648 /* tag if this is a nullfunc frame to enable PS when AP acks it */ 1643 /* tag if this is a nullfunc frame to enable PS when AP acks it */
1649 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) { 1644 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
1650 bf->bf_isnullfunc = true; 1645 bf->bf_isnullfunc = true;
1651 sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED; 1646 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
1652 } else 1647 } else
1653 bf->bf_isnullfunc = false; 1648 bf->bf_isnullfunc = false;
1654 1649
@@ -1858,15 +1853,15 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1858 skb_pull(skb, padsize); 1853 skb_pull(skb, padsize);
1859 } 1854 }
1860 1855
1861 if (sc->sc_flags & SC_OP_WAIT_FOR_TX_ACK) { 1856 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1862 sc->sc_flags &= ~SC_OP_WAIT_FOR_TX_ACK; 1857 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
1863 ath_print(common, ATH_DBG_PS, 1858 ath_print(common, ATH_DBG_PS,
1864 "Going back to sleep after having " 1859 "Going back to sleep after having "
1865 "received TX status (0x%x)\n", 1860 "received TX status (0x%lx)\n",
1866 sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 1861 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1867 SC_OP_WAIT_FOR_CAB | 1862 PS_WAIT_FOR_CAB |
1868 SC_OP_WAIT_FOR_PSPOLL_DATA | 1863 PS_WAIT_FOR_PSPOLL_DATA |
1869 SC_OP_WAIT_FOR_TX_ACK)); 1864 PS_WAIT_FOR_TX_ACK));
1870 } 1865 }
1871 1866
1872 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL)) 1867 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
@@ -2053,11 +2048,10 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2053 */ 2048 */
2054 if (bf->bf_isnullfunc && 2049 if (bf->bf_isnullfunc &&
2055 (ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) { 2050 (ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) {
2056 if ((sc->sc_flags & SC_OP_PS_ENABLED)) { 2051 if ((sc->ps_flags & PS_ENABLED))
2057 sc->ps_enabled = true; 2052 ath9k_enable_ps(sc);
2058 ath9k_hw_setrxabort(sc->sc_ah, 1); 2053 else
2059 } else 2054 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2060 sc->sc_flags |= SC_OP_NULLFUNC_COMPLETED;
2061 } 2055 }
2062 2056
2063 /* 2057 /*
diff --git a/drivers/net/wireless/ath/debug.h b/drivers/net/wireless/ath/debug.h
index d6b685a06c5e..8263633c003c 100644
--- a/drivers/net/wireless/ath/debug.h
+++ b/drivers/net/wireless/ath/debug.h
@@ -65,11 +65,11 @@ enum ATH_DEBUG {
65#define ATH_DBG_DEFAULT (ATH_DBG_FATAL) 65#define ATH_DBG_DEFAULT (ATH_DBG_FATAL)
66 66
67#ifdef CONFIG_ATH_DEBUG 67#ifdef CONFIG_ATH_DEBUG
68void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...); 68void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
69 __attribute__ ((format (printf, 3, 4)));
69#else 70#else
70static inline void ath_print(struct ath_common *common, 71static inline void __attribute__ ((format (printf, 3, 4)))
71 int dbg_mask, 72ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
72 const char *fmt, ...)
73{ 73{
74} 74}
75#endif /* CONFIG_ATH_DEBUG */ 75#endif /* CONFIG_ATH_DEBUG */
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 039ac490465c..04abd1f556b7 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -110,8 +110,9 @@ static const struct ieee80211_regdomain ath_world_regdom_67_68_6A = {
110 110
111static inline bool is_wwr_sku(u16 regd) 111static inline bool is_wwr_sku(u16 regd)
112{ 112{
113 return ((regd & WORLD_SKU_MASK) == WORLD_SKU_PREFIX) || 113 return ((regd & COUNTRY_ERD_FLAG) != COUNTRY_ERD_FLAG) &&
114 (regd == WORLD); 114 (((regd & WORLD_SKU_MASK) == WORLD_SKU_PREFIX) ||
115 (regd == WORLD));
115} 116}
116 117
117static u16 ath_regd_get_eepromRD(struct ath_regulatory *reg) 118static u16 ath_regd_get_eepromRD(struct ath_regulatory *reg)
diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c
index 92f87fbe750f..9ab1192004c0 100644
--- a/drivers/net/wireless/atmel_pci.c
+++ b/drivers/net/wireless/atmel_pci.c
@@ -31,7 +31,7 @@ MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.")
31MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
32MODULE_SUPPORTED_DEVICE("Atmel at76c506 PCI wireless cards"); 32MODULE_SUPPORTED_DEVICE("Atmel at76c506 PCI wireless cards");
33 33
34static struct pci_device_id card_ids[] = { 34static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
35 { 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID }, 35 { 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID },
36 { 0, } 36 { 0, }
37}; 37};
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 64c12e1bced3..0a00d42642cd 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -78,11 +78,11 @@ config B43_SDIO
78 78
79 If unsure, say N. 79 If unsure, say N.
80 80
81# Data transfers to the device via PIO 81#Data transfers to the device via PIO. We want it as a fallback even
82# This is only needed on PCMCIA and SDIO devices. All others can do DMA properly. 82# if we can do DMA.
83config B43_PIO 83config B43_PIO
84 bool 84 bool
85 depends on B43 && (B43_SDIO || B43_PCMCIA || B43_FORCE_PIO) 85 depends on B43
86 select SSB_BLOCKIO 86 select SSB_BLOCKIO
87 default y 87 default y
88 88
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 84772a2542dc..5e83b6f0a3a0 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -12,7 +12,7 @@ b43-y += xmit.o
12b43-y += lo.o 12b43-y += lo.o
13b43-y += wa.o 13b43-y += wa.o
14b43-y += dma.o 14b43-y += dma.o
15b43-$(CONFIG_B43_PIO) += pio.o 15b43-y += pio.o
16b43-y += rfkill.o 16b43-y += rfkill.o
17b43-$(CONFIG_B43_LEDS) += leds.o 17b43-$(CONFIG_B43_LEDS) += leds.o
18b43-$(CONFIG_B43_PCMCIA) += pcmcia.o 18b43-$(CONFIG_B43_PCMCIA) += pcmcia.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index c484cc253892..b8807fb12c92 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -254,6 +254,14 @@ enum {
254#define B43_SHM_SH_MAXBFRAMES 0x0080 /* Maximum number of frames in a burst */ 254#define B43_SHM_SH_MAXBFRAMES 0x0080 /* Maximum number of frames in a burst */
255#define B43_SHM_SH_SPUWKUP 0x0094 /* pre-wakeup for synth PU in us */ 255#define B43_SHM_SH_SPUWKUP 0x0094 /* pre-wakeup for synth PU in us */
256#define B43_SHM_SH_PRETBTT 0x0096 /* pre-TBTT in us */ 256#define B43_SHM_SH_PRETBTT 0x0096 /* pre-TBTT in us */
257/* SHM_SHARED tx iq workarounds */
258#define B43_SHM_SH_NPHY_TXIQW0 0x0700
259#define B43_SHM_SH_NPHY_TXIQW1 0x0702
260#define B43_SHM_SH_NPHY_TXIQW2 0x0704
261#define B43_SHM_SH_NPHY_TXIQW3 0x0706
262/* SHM_SHARED tx pwr ctrl */
263#define B43_SHM_SH_NPHY_TXPWR_INDX0 0x0708
264#define B43_SHM_SH_NPHY_TXPWR_INDX1 0x070E
257 265
258/* SHM_SCRATCH offsets */ 266/* SHM_SCRATCH offsets */
259#define B43_SHM_SC_MINCONT 0x0003 /* Minimum contention window */ 267#define B43_SHM_SC_MINCONT 0x0003 /* Minimum contention window */
@@ -694,6 +702,7 @@ struct b43_wldev {
694 bool radio_hw_enable; /* saved state of radio hardware enabled state */ 702 bool radio_hw_enable; /* saved state of radio hardware enabled state */
695 bool qos_enabled; /* TRUE, if QoS is used. */ 703 bool qos_enabled; /* TRUE, if QoS is used. */
696 bool hwcrypto_enabled; /* TRUE, if HW crypto acceleration is enabled. */ 704 bool hwcrypto_enabled; /* TRUE, if HW crypto acceleration is enabled. */
705 bool use_pio; /* TRUE if next init should use PIO */
697 706
698 /* PHY/Radio device. */ 707 /* PHY/Radio device. */
699 struct b43_phy phy; 708 struct b43_phy phy;
@@ -822,11 +831,9 @@ struct b43_wl {
822 /* The device LEDs. */ 831 /* The device LEDs. */
823 struct b43_leds leds; 832 struct b43_leds leds;
824 833
825#ifdef CONFIG_B43_PIO
826 /* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */ 834 /* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */
827 u8 pio_scratchspace[110] __attribute__((__aligned__(8))); 835 u8 pio_scratchspace[110] __attribute__((__aligned__(8)));
828 u8 pio_tailspace[4] __attribute__((__aligned__(8))); 836 u8 pio_tailspace[4] __attribute__((__aligned__(8)));
829#endif /* CONFIG_B43_PIO */
830}; 837};
831 838
832static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw) 839static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw)
@@ -877,20 +884,15 @@ static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value)
877 884
878static inline bool b43_using_pio_transfers(struct b43_wldev *dev) 885static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
879{ 886{
880#ifdef CONFIG_B43_PIO
881 return dev->__using_pio_transfers; 887 return dev->__using_pio_transfers;
882#else
883 return 0;
884#endif
885} 888}
886 889
887#ifdef CONFIG_B43_FORCE_PIO 890#ifdef CONFIG_B43_FORCE_PIO
888# define B43_FORCE_PIO 1 891# define B43_PIO_DEFAULT 1
889#else 892#else
890# define B43_FORCE_PIO 0 893# define B43_PIO_DEFAULT 0
891#endif 894#endif
892 895
893
894/* Message printing */ 896/* Message printing */
895void b43info(struct b43_wl *wl, const char *fmt, ...) 897void b43info(struct b43_wl *wl, const char *fmt, ...)
896 __attribute__ ((format(printf, 2, 3))); 898 __attribute__ ((format(printf, 2, 3)));
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 88d1fd02d40a..be7abf8916ad 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1369,7 +1369,6 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1369 b43err(dev->wl, "DMA tx mapping failure\n"); 1369 b43err(dev->wl, "DMA tx mapping failure\n");
1370 goto out; 1370 goto out;
1371 } 1371 }
1372 ring->nr_tx_packets++;
1373 if ((free_slots(ring) < TX_SLOTS_PER_FRAME) || 1372 if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
1374 should_inject_overflow(ring)) { 1373 should_inject_overflow(ring)) {
1375 /* This TX ring is full. */ 1374 /* This TX ring is full. */
@@ -1500,22 +1499,6 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1500 } 1499 }
1501} 1500}
1502 1501
1503void b43_dma_get_tx_stats(struct b43_wldev *dev,
1504 struct ieee80211_tx_queue_stats *stats)
1505{
1506 const int nr_queues = dev->wl->hw->queues;
1507 struct b43_dmaring *ring;
1508 int i;
1509
1510 for (i = 0; i < nr_queues; i++) {
1511 ring = select_ring_by_priority(dev, i);
1512
1513 stats[i].len = ring->used_slots / TX_SLOTS_PER_FRAME;
1514 stats[i].limit = ring->nr_slots / TX_SLOTS_PER_FRAME;
1515 stats[i].count = ring->nr_tx_packets;
1516 }
1517}
1518
1519static void dma_rx(struct b43_dmaring *ring, int *slot) 1502static void dma_rx(struct b43_dmaring *ring, int *slot)
1520{ 1503{
1521 const struct b43_dma_ops *ops = ring->ops; 1504 const struct b43_dma_ops *ops = ring->ops;
@@ -1653,7 +1636,6 @@ void b43_dma_tx_resume(struct b43_wldev *dev)
1653 b43_power_saving_ctl_bits(dev, 0); 1636 b43_power_saving_ctl_bits(dev, 0);
1654} 1637}
1655 1638
1656#ifdef CONFIG_B43_PIO
1657static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type, 1639static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
1658 u16 mmio_base, bool enable) 1640 u16 mmio_base, bool enable)
1659{ 1641{
@@ -1687,4 +1669,3 @@ void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
1687 mmio_base = b43_dmacontroller_base(type, engine_index); 1669 mmio_base = b43_dmacontroller_base(type, engine_index);
1688 direct_fifo_rx(dev, type, mmio_base, enable); 1670 direct_fifo_rx(dev, type, mmio_base, enable);
1689} 1671}
1690#endif /* CONFIG_B43_PIO */
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index f7ab37c4cdbc..dc91944d6022 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -228,8 +228,6 @@ struct b43_dmaring {
228 int used_slots; 228 int used_slots;
229 /* Currently used slot in the ring. */ 229 /* Currently used slot in the ring. */
230 int current_slot; 230 int current_slot;
231 /* Total number of packets sent. Statistics only. */
232 unsigned int nr_tx_packets;
233 /* Frameoffset in octets. */ 231 /* Frameoffset in octets. */
234 u32 frameoffset; 232 u32 frameoffset;
235 /* Descriptor buffer size. */ 233 /* Descriptor buffer size. */
@@ -278,9 +276,6 @@ void b43_dma_free(struct b43_wldev *dev);
278void b43_dma_tx_suspend(struct b43_wldev *dev); 276void b43_dma_tx_suspend(struct b43_wldev *dev);
279void b43_dma_tx_resume(struct b43_wldev *dev); 277void b43_dma_tx_resume(struct b43_wldev *dev);
280 278
281void b43_dma_get_tx_stats(struct b43_wldev *dev,
282 struct ieee80211_tx_queue_stats *stats);
283
284int b43_dma_tx(struct b43_wldev *dev, 279int b43_dma_tx(struct b43_wldev *dev,
285 struct sk_buff *skb); 280 struct sk_buff *skb);
286void b43_dma_handle_txstatus(struct b43_wldev *dev, 281void b43_dma_handle_txstatus(struct b43_wldev *dev,
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 490fb45d1d05..1521b1e78d21 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -67,7 +67,12 @@ MODULE_AUTHOR("Gábor Stefanik");
67MODULE_LICENSE("GPL"); 67MODULE_LICENSE("GPL");
68 68
69MODULE_FIRMWARE(B43_SUPPORTED_FIRMWARE_ID); 69MODULE_FIRMWARE(B43_SUPPORTED_FIRMWARE_ID);
70 70MODULE_FIRMWARE("b43/ucode11.fw");
71MODULE_FIRMWARE("b43/ucode13.fw");
72MODULE_FIRMWARE("b43/ucode14.fw");
73MODULE_FIRMWARE("b43/ucode15.fw");
74MODULE_FIRMWARE("b43/ucode5.fw");
75MODULE_FIRMWARE("b43/ucode9.fw");
71 76
72static int modparam_bad_frames_preempt; 77static int modparam_bad_frames_preempt;
73module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444); 78module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444);
@@ -102,6 +107,9 @@ int b43_modparam_verbose = B43_VERBOSITY_DEFAULT;
102module_param_named(verbose, b43_modparam_verbose, int, 0644); 107module_param_named(verbose, b43_modparam_verbose, int, 0644);
103MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug"); 108MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug");
104 109
110int b43_modparam_pio = B43_PIO_DEFAULT;
111module_param_named(pio, b43_modparam_pio, int, 0644);
112MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
105 113
106static const struct ssb_device_id b43_ssb_tbl[] = { 114static const struct ssb_device_id b43_ssb_tbl[] = {
107 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5), 115 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5),
@@ -110,6 +118,7 @@ static const struct ssb_device_id b43_ssb_tbl[] = {
110 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 9), 118 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 9),
111 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 10), 119 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 10),
112 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 11), 120 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 11),
121 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 12),
113 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 13), 122 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 13),
114 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 15), 123 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 15),
115 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 16), 124 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 16),
@@ -842,8 +851,10 @@ static void rx_tkip_phase1_write(struct b43_wldev *dev, u8 index, u32 iv32,
842} 851}
843 852
844static void b43_op_update_tkip_key(struct ieee80211_hw *hw, 853static void b43_op_update_tkip_key(struct ieee80211_hw *hw,
845 struct ieee80211_key_conf *keyconf, const u8 *addr, 854 struct ieee80211_vif *vif,
846 u32 iv32, u16 *phase1key) 855 struct ieee80211_key_conf *keyconf,
856 struct ieee80211_sta *sta,
857 u32 iv32, u16 *phase1key)
847{ 858{
848 struct b43_wl *wl = hw_to_b43_wl(hw); 859 struct b43_wl *wl = hw_to_b43_wl(hw);
849 struct b43_wldev *dev; 860 struct b43_wldev *dev;
@@ -852,19 +863,19 @@ static void b43_op_update_tkip_key(struct ieee80211_hw *hw,
852 if (B43_WARN_ON(!modparam_hwtkip)) 863 if (B43_WARN_ON(!modparam_hwtkip))
853 return; 864 return;
854 865
855 mutex_lock(&wl->mutex); 866 /* This is only called from the RX path through mac80211, where
856 867 * our mutex is already locked. */
868 B43_WARN_ON(!mutex_is_locked(&wl->mutex));
857 dev = wl->current_dev; 869 dev = wl->current_dev;
858 if (!dev || b43_status(dev) < B43_STAT_INITIALIZED) 870 B43_WARN_ON(!dev || b43_status(dev) < B43_STAT_INITIALIZED);
859 goto out_unlock;
860 871
861 keymac_write(dev, index, NULL); /* First zero out mac to avoid race */ 872 keymac_write(dev, index, NULL); /* First zero out mac to avoid race */
862 873
863 rx_tkip_phase1_write(dev, index, iv32, phase1key); 874 rx_tkip_phase1_write(dev, index, iv32, phase1key);
864 keymac_write(dev, index, addr); 875 /* only pairwise TKIP keys are supported right now */
865 876 if (WARN_ON(!sta))
866out_unlock: 877 return;
867 mutex_unlock(&wl->mutex); 878 keymac_write(dev, index, sta->addr);
868} 879}
869 880
870static void do_key_write(struct b43_wldev *dev, 881static void do_key_write(struct b43_wldev *dev,
@@ -1793,8 +1804,9 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
1793 dma_reason[4], dma_reason[5]); 1804 dma_reason[4], dma_reason[5]);
1794 b43err(dev->wl, "This device does not support DMA " 1805 b43err(dev->wl, "This device does not support DMA "
1795 "on your system. Please use PIO instead.\n"); 1806 "on your system. Please use PIO instead.\n");
1796 b43err(dev->wl, "CONFIG_B43_FORCE_PIO must be set in " 1807 /* Fall back to PIO transfers if we get fatal DMA errors! */
1797 "your kernel configuration.\n"); 1808 dev->use_pio = 1;
1809 b43_controller_restart(dev, "DMA error");
1798 return; 1810 return;
1799 } 1811 }
1800 if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) { 1812 if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
@@ -3345,27 +3357,6 @@ out_unlock:
3345 return err; 3357 return err;
3346} 3358}
3347 3359
3348static int b43_op_get_tx_stats(struct ieee80211_hw *hw,
3349 struct ieee80211_tx_queue_stats *stats)
3350{
3351 struct b43_wl *wl = hw_to_b43_wl(hw);
3352 struct b43_wldev *dev;
3353 int err = -ENODEV;
3354
3355 mutex_lock(&wl->mutex);
3356 dev = wl->current_dev;
3357 if (dev && b43_status(dev) >= B43_STAT_STARTED) {
3358 if (b43_using_pio_transfers(dev))
3359 b43_pio_get_tx_stats(dev, stats);
3360 else
3361 b43_dma_get_tx_stats(dev, stats);
3362 err = 0;
3363 }
3364 mutex_unlock(&wl->mutex);
3365
3366 return err;
3367}
3368
3369static int b43_op_get_stats(struct ieee80211_hw *hw, 3360static int b43_op_get_stats(struct ieee80211_hw *hw,
3370 struct ieee80211_low_level_stats *stats) 3361 struct ieee80211_low_level_stats *stats)
3371{ 3362{
@@ -3569,6 +3560,12 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
3569 dev = wl->current_dev; 3560 dev = wl->current_dev;
3570 phy = &dev->phy; 3561 phy = &dev->phy;
3571 3562
3563 if (conf_is_ht(conf))
3564 phy->is_40mhz =
3565 (conf_is_ht40_minus(conf) || conf_is_ht40_plus(conf));
3566 else
3567 phy->is_40mhz = false;
3568
3572 b43_mac_suspend(dev); 3569 b43_mac_suspend(dev);
3573 3570
3574 if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) 3571 if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
@@ -3970,6 +3967,7 @@ static int b43_wireless_core_start(struct b43_wldev *dev)
3970 } 3967 }
3971 3968
3972 /* We are ready to run. */ 3969 /* We are ready to run. */
3970 ieee80211_wake_queues(dev->wl->hw);
3973 b43_set_status(dev, B43_STAT_STARTED); 3971 b43_set_status(dev, B43_STAT_STARTED);
3974 3972
3975 /* Start data flow (TX/RX). */ 3973 /* Start data flow (TX/RX). */
@@ -4360,7 +4358,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4360 4358
4361 if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) || 4359 if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) ||
4362 (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) || 4360 (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) ||
4363 B43_FORCE_PIO) { 4361 dev->use_pio) {
4364 dev->__using_pio_transfers = 1; 4362 dev->__using_pio_transfers = 1;
4365 err = b43_pio_init(dev); 4363 err = b43_pio_init(dev);
4366 } else { 4364 } else {
@@ -4379,8 +4377,6 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4379 4377
4380 ieee80211_wake_queues(dev->wl->hw); 4378 ieee80211_wake_queues(dev->wl->hw);
4381 4379
4382 ieee80211_wake_queues(dev->wl->hw);
4383
4384 b43_set_status(dev, B43_STAT_INITIALIZED); 4380 b43_set_status(dev, B43_STAT_INITIALIZED);
4385 4381
4386out: 4382out:
@@ -4395,7 +4391,7 @@ err_busdown:
4395} 4391}
4396 4392
4397static int b43_op_add_interface(struct ieee80211_hw *hw, 4393static int b43_op_add_interface(struct ieee80211_hw *hw,
4398 struct ieee80211_if_init_conf *conf) 4394 struct ieee80211_vif *vif)
4399{ 4395{
4400 struct b43_wl *wl = hw_to_b43_wl(hw); 4396 struct b43_wl *wl = hw_to_b43_wl(hw);
4401 struct b43_wldev *dev; 4397 struct b43_wldev *dev;
@@ -4403,24 +4399,24 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
4403 4399
4404 /* TODO: allow WDS/AP devices to coexist */ 4400 /* TODO: allow WDS/AP devices to coexist */
4405 4401
4406 if (conf->type != NL80211_IFTYPE_AP && 4402 if (vif->type != NL80211_IFTYPE_AP &&
4407 conf->type != NL80211_IFTYPE_MESH_POINT && 4403 vif->type != NL80211_IFTYPE_MESH_POINT &&
4408 conf->type != NL80211_IFTYPE_STATION && 4404 vif->type != NL80211_IFTYPE_STATION &&
4409 conf->type != NL80211_IFTYPE_WDS && 4405 vif->type != NL80211_IFTYPE_WDS &&
4410 conf->type != NL80211_IFTYPE_ADHOC) 4406 vif->type != NL80211_IFTYPE_ADHOC)
4411 return -EOPNOTSUPP; 4407 return -EOPNOTSUPP;
4412 4408
4413 mutex_lock(&wl->mutex); 4409 mutex_lock(&wl->mutex);
4414 if (wl->operating) 4410 if (wl->operating)
4415 goto out_mutex_unlock; 4411 goto out_mutex_unlock;
4416 4412
4417 b43dbg(wl, "Adding Interface type %d\n", conf->type); 4413 b43dbg(wl, "Adding Interface type %d\n", vif->type);
4418 4414
4419 dev = wl->current_dev; 4415 dev = wl->current_dev;
4420 wl->operating = 1; 4416 wl->operating = 1;
4421 wl->vif = conf->vif; 4417 wl->vif = vif;
4422 wl->if_type = conf->type; 4418 wl->if_type = vif->type;
4423 memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN); 4419 memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
4424 4420
4425 b43_adjust_opmode(dev); 4421 b43_adjust_opmode(dev);
4426 b43_set_pretbtt(dev); 4422 b43_set_pretbtt(dev);
@@ -4435,17 +4431,17 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
4435} 4431}
4436 4432
4437static void b43_op_remove_interface(struct ieee80211_hw *hw, 4433static void b43_op_remove_interface(struct ieee80211_hw *hw,
4438 struct ieee80211_if_init_conf *conf) 4434 struct ieee80211_vif *vif)
4439{ 4435{
4440 struct b43_wl *wl = hw_to_b43_wl(hw); 4436 struct b43_wl *wl = hw_to_b43_wl(hw);
4441 struct b43_wldev *dev = wl->current_dev; 4437 struct b43_wldev *dev = wl->current_dev;
4442 4438
4443 b43dbg(wl, "Removing Interface type %d\n", conf->type); 4439 b43dbg(wl, "Removing Interface type %d\n", vif->type);
4444 4440
4445 mutex_lock(&wl->mutex); 4441 mutex_lock(&wl->mutex);
4446 4442
4447 B43_WARN_ON(!wl->operating); 4443 B43_WARN_ON(!wl->operating);
4448 B43_WARN_ON(wl->vif != conf->vif); 4444 B43_WARN_ON(wl->vif != vif);
4449 wl->vif = NULL; 4445 wl->vif = NULL;
4450 4446
4451 wl->operating = 0; 4447 wl->operating = 0;
@@ -4586,7 +4582,6 @@ static const struct ieee80211_ops b43_hw_ops = {
4586 .set_key = b43_op_set_key, 4582 .set_key = b43_op_set_key,
4587 .update_tkip_key = b43_op_update_tkip_key, 4583 .update_tkip_key = b43_op_update_tkip_key,
4588 .get_stats = b43_op_get_stats, 4584 .get_stats = b43_op_get_stats,
4589 .get_tx_stats = b43_op_get_tx_stats,
4590 .get_tsf = b43_op_get_tsf, 4585 .get_tsf = b43_op_get_tsf,
4591 .set_tsf = b43_op_set_tsf, 4586 .set_tsf = b43_op_set_tsf,
4592 .start = b43_op_start, 4587 .start = b43_op_start,
@@ -4830,6 +4825,7 @@ static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl)
4830 if (!wldev) 4825 if (!wldev)
4831 goto out; 4826 goto out;
4832 4827
4828 wldev->use_pio = b43_modparam_pio;
4833 wldev->dev = dev; 4829 wldev->dev = dev;
4834 wldev->wl = wl; 4830 wldev->wl = wl;
4835 b43_set_status(wldev, B43_STAT_UNINIT); 4831 b43_set_status(wldev, B43_STAT_UNINIT);
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index 75b26e175e8f..8f7d7eff2d80 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -421,3 +421,48 @@ void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on)
421{ 421{
422 b43_write16(dev, B43_MMIO_PHY0, on ? 0 : 0xF4); 422 b43_write16(dev, B43_MMIO_PHY0, on ? 0 : 0xF4);
423} 423}
424
425/* http://bcm-v4.sipsolutions.net/802.11/PHY/Cordic */
426struct b43_c32 b43_cordic(int theta)
427{
428 u32 arctg[] = { 2949120, 1740967, 919879, 466945, 234379, 117304,
429 58666, 29335, 14668, 7334, 3667, 1833, 917, 458,
430 229, 115, 57, 29, };
431 u8 i;
432 s32 tmp;
433 s8 signx = 1;
434 u32 angle = 0;
435 struct b43_c32 ret = { .i = 39797, .q = 0, };
436
437 while (theta > (180 << 16))
438 theta -= (360 << 16);
439 while (theta < -(180 << 16))
440 theta += (360 << 16);
441
442 if (theta > (90 << 16)) {
443 theta -= (180 << 16);
444 signx = -1;
445 } else if (theta < -(90 << 16)) {
446 theta += (180 << 16);
447 signx = -1;
448 }
449
450 for (i = 0; i <= 17; i++) {
451 if (theta > angle) {
452 tmp = ret.i - (ret.q >> i);
453 ret.q += ret.i >> i;
454 ret.i = tmp;
455 angle += arctg[i];
456 } else {
457 tmp = ret.i + (ret.q >> i);
458 ret.q -= ret.i >> i;
459 ret.i = tmp;
460 angle -= arctg[i];
461 }
462 }
463
464 ret.i *= signx;
465 ret.q *= signx;
466
467 return ret;
468}
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
index 9edd4e8e0c85..bd480b481bfc 100644
--- a/drivers/net/wireless/b43/phy_common.h
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -5,6 +5,12 @@
5 5
6struct b43_wldev; 6struct b43_wldev;
7 7
8/* Complex number using 2 32-bit signed integers */
9struct b43_c32 { s32 i, q; };
10
11#define CORDIC_CONVERT(value) (((value) >= 0) ? \
12 ((((value) >> 15) + 1) >> 1) : \
13 -((((-(value)) >> 15) + 1) >> 1))
8 14
9/* PHY register routing bits */ 15/* PHY register routing bits */
10#define B43_PHYROUTE 0x0C00 /* PHY register routing bits mask */ 16#define B43_PHYROUTE 0x0C00 /* PHY register routing bits mask */
@@ -212,6 +218,9 @@ struct b43_phy {
212 bool supports_2ghz; 218 bool supports_2ghz;
213 bool supports_5ghz; 219 bool supports_5ghz;
214 220
221 /* HT info */
222 bool is_40mhz;
223
215 /* GMODE bit enabled? */ 224 /* GMODE bit enabled? */
216 bool gmode; 225 bool gmode;
217 226
@@ -418,5 +427,6 @@ int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset);
418 */ 427 */
419void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on); 428void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on);
420 429
430struct b43_c32 b43_cordic(int theta);
421 431
422#endif /* LINUX_B43_PHY_COMMON_H_ */ 432#endif /* LINUX_B43_PHY_COMMON_H_ */
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index 3e046ec1ff86..185219e0a552 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -80,6 +80,7 @@ static void b43_lpphy_op_free(struct b43_wldev *dev)
80 dev->phy.lp = NULL; 80 dev->phy.lp = NULL;
81} 81}
82 82
83/* http://bcm-v4.sipsolutions.net/802.11/PHY/LP/ReadBandSrom */
83static void lpphy_read_band_sprom(struct b43_wldev *dev) 84static void lpphy_read_band_sprom(struct b43_wldev *dev)
84{ 85{
85 struct b43_phy_lp *lpphy = dev->phy.lp; 86 struct b43_phy_lp *lpphy = dev->phy.lp;
@@ -101,6 +102,12 @@ static void lpphy_read_band_sprom(struct b43_wldev *dev)
101 maxpwr = bus->sprom.maxpwr_bg; 102 maxpwr = bus->sprom.maxpwr_bg;
102 lpphy->max_tx_pwr_med_band = maxpwr; 103 lpphy->max_tx_pwr_med_band = maxpwr;
103 cckpo = bus->sprom.cck2gpo; 104 cckpo = bus->sprom.cck2gpo;
105 /*
106 * We don't read SPROM's opo as specs say. On rev8 SPROMs
107 * opo == ofdm2gpo and we don't know any SSB with LP-PHY
108 * and SPROM rev below 8.
109 */
110 B43_WARN_ON(bus->sprom.revision < 8);
104 ofdmpo = bus->sprom.ofdm2gpo; 111 ofdmpo = bus->sprom.ofdm2gpo;
105 if (cckpo) { 112 if (cckpo) {
106 for (i = 0; i < 4; i++) { 113 for (i = 0; i < 4; i++) {
@@ -1703,19 +1710,6 @@ static const struct lpphy_rx_iq_comp lpphy_rev2plus_iq_comp = {
1703 .c0 = 0, 1710 .c0 = 0,
1704}; 1711};
1705 1712
1706static u8 lpphy_nbits(s32 val)
1707{
1708 u32 tmp = abs(val);
1709 u8 nbits = 0;
1710
1711 while (tmp != 0) {
1712 nbits++;
1713 tmp >>= 1;
1714 }
1715
1716 return nbits;
1717}
1718
1719static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples) 1713static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples)
1720{ 1714{
1721 struct lpphy_iq_est iq_est; 1715 struct lpphy_iq_est iq_est;
@@ -1742,8 +1736,8 @@ static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples)
1742 goto out; 1736 goto out;
1743 } 1737 }
1744 1738
1745 prod_msb = lpphy_nbits(prod); 1739 prod_msb = fls(abs(prod));
1746 q_msb = lpphy_nbits(qpwr); 1740 q_msb = fls(abs(qpwr));
1747 tmp1 = prod_msb - 20; 1741 tmp1 = prod_msb - 20;
1748 1742
1749 if (tmp1 >= 0) { 1743 if (tmp1 >= 0) {
@@ -1773,47 +1767,6 @@ out:
1773 return ret; 1767 return ret;
1774} 1768}
1775 1769
1776/* Complex number using 2 32-bit signed integers */
1777typedef struct {s32 i, q;} lpphy_c32;
1778
1779static lpphy_c32 lpphy_cordic(int theta)
1780{
1781 u32 arctg[] = { 2949120, 1740967, 919879, 466945, 234379, 117304,
1782 58666, 29335, 14668, 7334, 3667, 1833, 917, 458,
1783 229, 115, 57, 29, };
1784 int i, tmp, signx = 1, angle = 0;
1785 lpphy_c32 ret = { .i = 39797, .q = 0, };
1786
1787 theta = clamp_t(int, theta, -180, 180);
1788
1789 if (theta > 90) {
1790 theta -= 180;
1791 signx = -1;
1792 } else if (theta < -90) {
1793 theta += 180;
1794 signx = -1;
1795 }
1796
1797 for (i = 0; i <= 17; i++) {
1798 if (theta > angle) {
1799 tmp = ret.i - (ret.q >> i);
1800 ret.q += ret.i >> i;
1801 ret.i = tmp;
1802 angle += arctg[i];
1803 } else {
1804 tmp = ret.i + (ret.q >> i);
1805 ret.q -= ret.i >> i;
1806 ret.i = tmp;
1807 angle -= arctg[i];
1808 }
1809 }
1810
1811 ret.i *= signx;
1812 ret.q *= signx;
1813
1814 return ret;
1815}
1816
1817static void lpphy_run_samples(struct b43_wldev *dev, u16 samples, u16 loops, 1770static void lpphy_run_samples(struct b43_wldev *dev, u16 samples, u16 loops,
1818 u16 wait) 1771 u16 wait)
1819{ 1772{
@@ -1831,8 +1784,9 @@ static void lpphy_start_tx_tone(struct b43_wldev *dev, s32 freq, u16 max)
1831{ 1784{
1832 struct b43_phy_lp *lpphy = dev->phy.lp; 1785 struct b43_phy_lp *lpphy = dev->phy.lp;
1833 u16 buf[64]; 1786 u16 buf[64];
1834 int i, samples = 0, angle = 0, rotation = (9 * freq) / 500; 1787 int i, samples = 0, angle = 0;
1835 lpphy_c32 sample; 1788 int rotation = (((36 * freq) / 20) << 16) / 100;
1789 struct b43_c32 sample;
1836 1790
1837 lpphy->tx_tone_freq = freq; 1791 lpphy->tx_tone_freq = freq;
1838 1792
@@ -1848,10 +1802,10 @@ static void lpphy_start_tx_tone(struct b43_wldev *dev, s32 freq, u16 max)
1848 } 1802 }
1849 1803
1850 for (i = 0; i < samples; i++) { 1804 for (i = 0; i < samples; i++) {
1851 sample = lpphy_cordic(angle); 1805 sample = b43_cordic(angle);
1852 angle += rotation; 1806 angle += rotation;
1853 buf[i] = ((sample.i * max) & 0xFF) << 8; 1807 buf[i] = CORDIC_CONVERT((sample.i * max) & 0xFF) << 8;
1854 buf[i] |= (sample.q * max) & 0xFF; 1808 buf[i] |= CORDIC_CONVERT((sample.q * max) & 0xFF);
1855 } 1809 }
1856 1810
1857 b43_lptab_write_bulk(dev, B43_LPTAB16(5, 0), samples, buf); 1811 b43_lptab_write_bulk(dev, B43_LPTAB16(5, 0), samples, buf);
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 992318a78077..795bb1e3345d 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -28,7 +28,50 @@
28#include "b43.h" 28#include "b43.h"
29#include "phy_n.h" 29#include "phy_n.h"
30#include "tables_nphy.h" 30#include "tables_nphy.h"
31#include "main.h"
31 32
33struct nphy_txgains {
34 u16 txgm[2];
35 u16 pga[2];
36 u16 pad[2];
37 u16 ipa[2];
38};
39
40struct nphy_iqcal_params {
41 u16 txgm;
42 u16 pga;
43 u16 pad;
44 u16 ipa;
45 u16 cal_gain;
46 u16 ncorr[5];
47};
48
49struct nphy_iq_est {
50 s32 iq0_prod;
51 u32 i0_pwr;
52 u32 q0_pwr;
53 s32 iq1_prod;
54 u32 i1_pwr;
55 u32 q1_pwr;
56};
57
58enum b43_nphy_rf_sequence {
59 B43_RFSEQ_RX2TX,
60 B43_RFSEQ_TX2RX,
61 B43_RFSEQ_RESET2RX,
62 B43_RFSEQ_UPDATE_GAINH,
63 B43_RFSEQ_UPDATE_GAINL,
64 B43_RFSEQ_UPDATE_GAINU,
65};
66
67static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
68 u8 *events, u8 *delays, u8 length);
69static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
70 enum b43_nphy_rf_sequence seq);
71static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
72 u16 value, u8 core, bool off);
73static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
74 u16 value, u8 core);
32 75
33void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna) 76void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
34{//TODO 77{//TODO
@@ -197,173 +240,1020 @@ void b43_nphy_radio_turn_off(struct b43_wldev *dev)
197 ~B43_NPHY_RFCTL_CMD_EN); 240 ~B43_NPHY_RFCTL_CMD_EN);
198} 241}
199 242
200#define ntab_upload(dev, offset, data) do { \ 243/*
201 unsigned int i; \ 244 * Upload the N-PHY tables.
202 for (i = 0; i < (offset##_SIZE); i++) \ 245 * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables
203 b43_ntab_write(dev, (offset) + i, (data)[i]); \ 246 */
204 } while (0)
205
206/* Upload the N-PHY tables. */
207static void b43_nphy_tables_init(struct b43_wldev *dev) 247static void b43_nphy_tables_init(struct b43_wldev *dev)
208{ 248{
209 /* Static tables */ 249 if (dev->phy.rev < 3)
210 ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct); 250 b43_nphy_rev0_1_2_tables_init(dev);
211 ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup); 251 else
212 ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap); 252 b43_nphy_rev3plus_tables_init(dev);
213 ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn); 253}
214 ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel); 254
215 ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot); 255/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */
216 ntab_upload(dev, B43_NTAB_PILOTLT, b43_ntab_pilotlt); 256static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
217 ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0); 257{
218 ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1); 258 struct b43_phy_n *nphy = dev->phy.n;
219 ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0); 259 enum ieee80211_band band;
220 ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1); 260 u16 tmp;
221 ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi); 261
222 ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest); 262 if (!enable) {
223 ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs); 263 nphy->rfctrl_intc1_save = b43_phy_read(dev,
224 264 B43_NPHY_RFCTL_INTC1);
225 /* Volatile tables */ 265 nphy->rfctrl_intc2_save = b43_phy_read(dev,
226 ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10); 266 B43_NPHY_RFCTL_INTC2);
227 ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11); 267 band = b43_current_band(dev->wl);
228 ntab_upload(dev, B43_NTAB_C0_ESTPLT, b43_ntab_estimatepowerlt0); 268 if (dev->phy.rev >= 3) {
229 ntab_upload(dev, B43_NTAB_C1_ESTPLT, b43_ntab_estimatepowerlt1); 269 if (band == IEEE80211_BAND_5GHZ)
230 ntab_upload(dev, B43_NTAB_C0_ADJPLT, b43_ntab_adjustpower0); 270 tmp = 0x600;
231 ntab_upload(dev, B43_NTAB_C1_ADJPLT, b43_ntab_adjustpower1); 271 else
232 ntab_upload(dev, B43_NTAB_C0_GAINCTL, b43_ntab_gainctl0); 272 tmp = 0x480;
233 ntab_upload(dev, B43_NTAB_C1_GAINCTL, b43_ntab_gainctl1); 273 } else {
234 ntab_upload(dev, B43_NTAB_C0_IQLT, b43_ntab_iqlt0); 274 if (band == IEEE80211_BAND_5GHZ)
235 ntab_upload(dev, B43_NTAB_C1_IQLT, b43_ntab_iqlt1); 275 tmp = 0x180;
236 ntab_upload(dev, B43_NTAB_C0_LOFEEDTH, b43_ntab_loftlt0); 276 else
237 ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1); 277 tmp = 0x120;
278 }
279 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp);
280 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp);
281 } else {
282 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1,
283 nphy->rfctrl_intc1_save);
284 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2,
285 nphy->rfctrl_intc2_save);
286 }
238} 287}
239 288
240static void b43_nphy_workarounds(struct b43_wldev *dev) 289/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */
290static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev)
291{
292 struct b43_phy_n *nphy = dev->phy.n;
293 u16 tmp;
294 enum ieee80211_band band = b43_current_band(dev->wl);
295 bool ipa = (nphy->ipa2g_on && band == IEEE80211_BAND_2GHZ) ||
296 (nphy->ipa5g_on && band == IEEE80211_BAND_5GHZ);
297
298 if (dev->phy.rev >= 3) {
299 if (ipa) {
300 tmp = 4;
301 b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2,
302 (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
303 }
304
305 tmp = 1;
306 b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2,
307 (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
308 }
309}
310
311/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BmacPhyClkFgc */
312static void b43_nphy_bmac_clock_fgc(struct b43_wldev *dev, bool force)
313{
314 u32 tmslow;
315
316 if (dev->phy.type != B43_PHYTYPE_N)
317 return;
318
319 tmslow = ssb_read32(dev->dev, SSB_TMSLOW);
320 if (force)
321 tmslow |= SSB_TMSLOW_FGC;
322 else
323 tmslow &= ~SSB_TMSLOW_FGC;
324 ssb_write32(dev->dev, SSB_TMSLOW, tmslow);
325}
326
327/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
328static void b43_nphy_reset_cca(struct b43_wldev *dev)
329{
330 u16 bbcfg;
331
332 b43_nphy_bmac_clock_fgc(dev, 1);
333 bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG);
334 b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA);
335 udelay(1);
336 b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA);
337 b43_nphy_bmac_clock_fgc(dev, 0);
338 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
339}
340
341/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */
342static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble)
343{
344 u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG);
345
346 mimocfg |= B43_NPHY_MIMOCFG_AUTO;
347 if (preamble == 1)
348 mimocfg |= B43_NPHY_MIMOCFG_GFMIX;
349 else
350 mimocfg &= ~B43_NPHY_MIMOCFG_GFMIX;
351
352 b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg);
353}
354
355/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */
356static void b43_nphy_update_txrx_chain(struct b43_wldev *dev)
357{
358 struct b43_phy_n *nphy = dev->phy.n;
359
360 bool override = false;
361 u16 chain = 0x33;
362
363 if (nphy->txrx_chain == 0) {
364 chain = 0x11;
365 override = true;
366 } else if (nphy->txrx_chain == 1) {
367 chain = 0x22;
368 override = true;
369 }
370
371 b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
372 ~(B43_NPHY_RFSEQCA_TXEN | B43_NPHY_RFSEQCA_RXEN),
373 chain);
374
375 if (override)
376 b43_phy_set(dev, B43_NPHY_RFSEQMODE,
377 B43_NPHY_RFSEQMODE_CAOVER);
378 else
379 b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
380 ~B43_NPHY_RFSEQMODE_CAOVER);
381}
382
383/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */
384static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est,
385 u16 samps, u8 time, bool wait)
386{
387 int i;
388 u16 tmp;
389
390 b43_phy_write(dev, B43_NPHY_IQEST_SAMCNT, samps);
391 b43_phy_maskset(dev, B43_NPHY_IQEST_WT, ~B43_NPHY_IQEST_WT_VAL, time);
392 if (wait)
393 b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_MODE);
394 else
395 b43_phy_mask(dev, B43_NPHY_IQEST_CMD, ~B43_NPHY_IQEST_CMD_MODE);
396
397 b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_START);
398
399 for (i = 1000; i; i--) {
400 tmp = b43_phy_read(dev, B43_NPHY_IQEST_CMD);
401 if (!(tmp & B43_NPHY_IQEST_CMD_START)) {
402 est->i0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI0) << 16) |
403 b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO0);
404 est->q0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI0) << 16) |
405 b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO0);
406 est->iq0_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI0) << 16) |
407 b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO0);
408
409 est->i1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI1) << 16) |
410 b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO1);
411 est->q1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI1) << 16) |
412 b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO1);
413 est->iq1_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI1) << 16) |
414 b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO1);
415 return;
416 }
417 udelay(10);
418 }
419 memset(est, 0, sizeof(*est));
420}
421
422/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */
423static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write,
424 struct b43_phy_n_iq_comp *pcomp)
425{
426 if (write) {
427 b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPA0, pcomp->a0);
428 b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPB0, pcomp->b0);
429 b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPA1, pcomp->a1);
430 b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPB1, pcomp->b1);
431 } else {
432 pcomp->a0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPA0);
433 pcomp->b0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPB0);
434 pcomp->a1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPA1);
435 pcomp->b1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPB1);
436 }
437}
438
439/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */
440static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core)
441{
442 u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
443
444 b43_phy_write(dev, B43_NPHY_RFSEQCA, regs[0]);
445 if (core == 0) {
446 b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[1]);
447 b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]);
448 } else {
449 b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]);
450 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]);
451 }
452 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[3]);
453 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[4]);
454 b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, regs[5]);
455 b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, regs[6]);
456 b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, regs[7]);
457 b43_phy_write(dev, B43_NPHY_RFCTL_OVER, regs[8]);
458 b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]);
459 b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]);
460}
461
462/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */
463static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
464{
465 u8 rxval, txval;
466 u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
467
468 regs[0] = b43_phy_read(dev, B43_NPHY_RFSEQCA);
469 if (core == 0) {
470 regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
471 regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
472 } else {
473 regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
474 regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
475 }
476 regs[3] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
477 regs[4] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
478 regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1);
479 regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2);
480 regs[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S1);
481 regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER);
482 regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0);
483 regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1);
484
485 b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001);
486 b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001);
487
488 b43_phy_maskset(dev, B43_NPHY_RFSEQCA, (u16)~B43_NPHY_RFSEQCA_RXDIS,
489 ((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT));
490 b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN,
491 ((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT));
492 b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN,
493 (core << B43_NPHY_RFSEQCA_RXEN_SHIFT));
494 b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXDIS,
495 (core << B43_NPHY_RFSEQCA_TXDIS_SHIFT));
496
497 if (core == 0) {
498 b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x0007);
499 b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0007);
500 } else {
501 b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x0007);
502 b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007);
503 }
504
505 b43_nphy_rf_control_intc_override(dev, 2, 0, 3);
506 b43_nphy_rf_control_override(dev, 8, 0, 3, false);
507 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
508
509 if (core == 0) {
510 rxval = 1;
511 txval = 8;
512 } else {
513 rxval = 4;
514 txval = 2;
515 }
516 b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1));
517 b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core));
518}
519
520/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
521static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
522{
523 int i;
524 s32 iq;
525 u32 ii;
526 u32 qq;
527 int iq_nbits, qq_nbits;
528 int arsh, brsh;
529 u16 tmp, a, b;
530
531 struct nphy_iq_est est;
532 struct b43_phy_n_iq_comp old;
533 struct b43_phy_n_iq_comp new = { };
534 bool error = false;
535
536 if (mask == 0)
537 return;
538
539 b43_nphy_rx_iq_coeffs(dev, false, &old);
540 b43_nphy_rx_iq_coeffs(dev, true, &new);
541 b43_nphy_rx_iq_est(dev, &est, 0x4000, 32, false);
542 new = old;
543
544 for (i = 0; i < 2; i++) {
545 if (i == 0 && (mask & 1)) {
546 iq = est.iq0_prod;
547 ii = est.i0_pwr;
548 qq = est.q0_pwr;
549 } else if (i == 1 && (mask & 2)) {
550 iq = est.iq1_prod;
551 ii = est.i1_pwr;
552 qq = est.q1_pwr;
553 } else {
554 B43_WARN_ON(1);
555 continue;
556 }
557
558 if (ii + qq < 2) {
559 error = true;
560 break;
561 }
562
563 iq_nbits = fls(abs(iq));
564 qq_nbits = fls(qq);
565
566 arsh = iq_nbits - 20;
567 if (arsh >= 0) {
568 a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh)));
569 tmp = ii >> arsh;
570 } else {
571 a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh)));
572 tmp = ii << -arsh;
573 }
574 if (tmp == 0) {
575 error = true;
576 break;
577 }
578 a /= tmp;
579
580 brsh = qq_nbits - 11;
581 if (brsh >= 0) {
582 b = (qq << (31 - qq_nbits));
583 tmp = ii >> brsh;
584 } else {
585 b = (qq << (31 - qq_nbits));
586 tmp = ii << -brsh;
587 }
588 if (tmp == 0) {
589 error = true;
590 break;
591 }
592 b = int_sqrt(b / tmp - a * a) - (1 << 10);
593
594 if (i == 0 && (mask & 0x1)) {
595 if (dev->phy.rev >= 3) {
596 new.a0 = a & 0x3FF;
597 new.b0 = b & 0x3FF;
598 } else {
599 new.a0 = b & 0x3FF;
600 new.b0 = a & 0x3FF;
601 }
602 } else if (i == 1 && (mask & 0x2)) {
603 if (dev->phy.rev >= 3) {
604 new.a1 = a & 0x3FF;
605 new.b1 = b & 0x3FF;
606 } else {
607 new.a1 = b & 0x3FF;
608 new.b1 = a & 0x3FF;
609 }
610 }
611 }
612
613 if (error)
614 new = old;
615
616 b43_nphy_rx_iq_coeffs(dev, true, &new);
617}
618
619/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */
620static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
621{
622 u16 array[4];
623 int i;
624
625 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x3C50);
626 for (i = 0; i < 4; i++)
627 array[i] = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
628
629 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]);
630 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]);
631 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW2, array[2]);
632 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]);
633}
634
635/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
636static void b43_nphy_write_clip_detection(struct b43_wldev *dev, u16 *clip_st)
637{
638 b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]);
639 b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]);
640}
641
642/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
643static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
644{
645 clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES);
646 clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES);
647}
648
649/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
650static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
651{
652 u16 tmp;
653
654 if (dev->dev->id.revision == 16)
655 b43_mac_suspend(dev);
656
657 tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL);
658 tmp &= (B43_NPHY_CLASSCTL_CCKEN | B43_NPHY_CLASSCTL_OFDMEN |
659 B43_NPHY_CLASSCTL_WAITEDEN);
660 tmp &= ~mask;
661 tmp |= (val & mask);
662 b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp);
663
664 if (dev->dev->id.revision == 16)
665 b43_mac_enable(dev);
666
667 return tmp;
668}
669
670/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */
671static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
241{ 672{
242 struct b43_phy *phy = &dev->phy; 673 struct b43_phy *phy = &dev->phy;
243 unsigned int i; 674 struct b43_phy_n *nphy = phy->n;
244 675
245 b43_phy_set(dev, B43_NPHY_IQFLIP, 676 if (enable) {
246 B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2); 677 u16 clip[] = { 0xFFFF, 0xFFFF };
247 if (1 /* FIXME band is 2.4GHz */) { 678 if (nphy->deaf_count++ == 0) {
248 b43_phy_set(dev, B43_NPHY_CLASSCTL, 679 nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
249 B43_NPHY_CLASSCTL_CCKEN); 680 b43_nphy_classifier(dev, 0x7, 0);
250 } else { 681 b43_nphy_read_clip_detection(dev, nphy->clip_state);
251 b43_phy_mask(dev, B43_NPHY_CLASSCTL, 682 b43_nphy_write_clip_detection(dev, clip);
252 ~B43_NPHY_CLASSCTL_CCKEN); 683 }
253 } 684 b43_nphy_reset_cca(dev);
254 b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8); 685 } else {
255 b43_phy_write(dev, B43_NPHY_TXFRAMEDELAY, 8); 686 if (--nphy->deaf_count == 0) {
256 687 b43_nphy_classifier(dev, 0x7, nphy->classifier_state);
257 /* Fixup some tables */ 688 b43_nphy_write_clip_detection(dev, nphy->clip_state);
258 b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0xA); 689 }
259 b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0xA); 690 }
260 b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA); 691}
261 b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA); 692
262 b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0); 693/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */
263 b43_ntab_write(dev, B43_NTAB16(8, 0x18), 0); 694static void b43_nphy_stop_playback(struct b43_wldev *dev)
264 b43_ntab_write(dev, B43_NTAB16(8, 0x07), 0x7AAB); 695{
265 b43_ntab_write(dev, B43_NTAB16(8, 0x17), 0x7AAB); 696 struct b43_phy_n *nphy = dev->phy.n;
266 b43_ntab_write(dev, B43_NTAB16(8, 0x06), 0x800); 697 u16 tmp;
267 b43_ntab_write(dev, B43_NTAB16(8, 0x16), 0x800); 698
268 699 if (nphy->hang_avoid)
269 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8); 700 b43_nphy_stay_in_carrier_search(dev, 1);
270 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301); 701
271 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8); 702 tmp = b43_phy_read(dev, B43_NPHY_SAMP_STAT);
272 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301); 703 if (tmp & 0x1)
273 704 b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP);
274 //TODO set RF sequence 705 else if (tmp & 0x2)
275 706 b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, (u16)~0x8000);
276 /* Set narrowband clip threshold */ 707
277 b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 66); 708 b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004);
278 b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 66); 709
279 710 if (nphy->bb_mult_save & 0x80000000) {
280 /* Set wideband clip 2 threshold */ 711 tmp = nphy->bb_mult_save & 0xFFFF;
281 b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES, 712 b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
282 ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, 713 nphy->bb_mult_save = 0;
283 21 << B43_NPHY_C1_CLIPWBTHRES_CLIP2_SHIFT); 714 }
284 b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES, 715
285 ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, 716 if (nphy->hang_avoid)
286 21 << B43_NPHY_C2_CLIPWBTHRES_CLIP2_SHIFT); 717 b43_nphy_stay_in_carrier_search(dev, 0);
287 718}
288 /* Set Clip 2 detect */ 719
289 b43_phy_set(dev, B43_NPHY_C1_CGAINI, 720/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */
290 B43_NPHY_C1_CGAINI_CL2DETECT); 721static void b43_nphy_spur_workaround(struct b43_wldev *dev)
291 b43_phy_set(dev, B43_NPHY_C2_CGAINI, 722{
292 B43_NPHY_C2_CGAINI_CL2DETECT); 723 struct b43_phy_n *nphy = dev->phy.n;
293 724
294 if (0 /*FIXME*/) { 725 unsigned int channel;
295 /* Set dwell lengths */ 726 int tone[2] = { 57, 58 };
296 b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 43); 727 u32 noise[2] = { 0x3FF, 0x3FF };
297 b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 43); 728
298 b43_phy_write(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 9); 729 B43_WARN_ON(dev->phy.rev < 3);
299 b43_phy_write(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 9); 730
300 731 if (nphy->hang_avoid)
301 /* Set gain backoff */ 732 b43_nphy_stay_in_carrier_search(dev, 1);
302 b43_phy_maskset(dev, B43_NPHY_C1_CGAINI, 733
303 ~B43_NPHY_C1_CGAINI_GAINBKOFF, 734 /* FIXME: channel = radio_chanspec */
304 1 << B43_NPHY_C1_CGAINI_GAINBKOFF_SHIFT); 735
305 b43_phy_maskset(dev, B43_NPHY_C2_CGAINI, 736 if (nphy->gband_spurwar_en) {
306 ~B43_NPHY_C2_CGAINI_GAINBKOFF, 737 /* TODO: N PHY Adjust Analog Pfbw (7) */
307 1 << B43_NPHY_C2_CGAINI_GAINBKOFF_SHIFT); 738 if (channel == 11 && dev->phy.is_40mhz)
739 ; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/
740 else
741 ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
742 /* TODO: N PHY Adjust CRS Min Power (0x1E) */
743 }
744
745 if (nphy->aband_spurwar_en) {
746 if (channel == 54) {
747 tone[0] = 0x20;
748 noise[0] = 0x25F;
749 } else if (channel == 38 || channel == 102 || channel == 118) {
750 if (0 /* FIXME */) {
751 tone[0] = 0x20;
752 noise[0] = 0x21F;
753 } else {
754 tone[0] = 0;
755 noise[0] = 0;
756 }
757 } else if (channel == 134) {
758 tone[0] = 0x20;
759 noise[0] = 0x21F;
760 } else if (channel == 151) {
761 tone[0] = 0x10;
762 noise[0] = 0x23F;
763 } else if (channel == 153 || channel == 161) {
764 tone[0] = 0x30;
765 noise[0] = 0x23F;
766 } else {
767 tone[0] = 0;
768 noise[0] = 0;
769 }
770
771 if (!tone[0] && !noise[0])
772 ; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/
773 else
774 ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
775 }
776
777 if (nphy->hang_avoid)
778 b43_nphy_stay_in_carrier_search(dev, 0);
779}
780
781/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
782static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev)
783{
784 struct b43_phy_n *nphy = dev->phy.n;
785 u8 i, j;
786 u8 code;
787
788 /* TODO: for PHY >= 3
789 s8 *lna1_gain, *lna2_gain;
790 u8 *gain_db, *gain_bits;
791 u16 *rfseq_init;
792 u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 };
793 u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 };
794 */
795
796 u8 rfseq_events[3] = { 6, 8, 7 };
797 u8 rfseq_delays[3] = { 10, 30, 1 };
798
799 if (dev->phy.rev >= 3) {
800 /* TODO */
801 } else {
802 /* Set Clip 2 detect */
803 b43_phy_set(dev, B43_NPHY_C1_CGAINI,
804 B43_NPHY_C1_CGAINI_CL2DETECT);
805 b43_phy_set(dev, B43_NPHY_C2_CGAINI,
806 B43_NPHY_C2_CGAINI_CL2DETECT);
807
808 /* Set narrowband clip threshold */
809 b43_phy_set(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84);
810 b43_phy_set(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84);
811
812 if (!dev->phy.is_40mhz) {
813 /* Set dwell lengths */
814 b43_phy_set(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B);
815 b43_phy_set(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B);
816 b43_phy_set(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009);
817 b43_phy_set(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009);
818 }
819
820 /* Set wideband clip 2 threshold */
821 b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
822 ~B43_NPHY_C1_CLIPWBTHRES_CLIP2,
823 21);
824 b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
825 ~B43_NPHY_C2_CLIPWBTHRES_CLIP2,
826 21);
827
828 if (!dev->phy.is_40mhz) {
829 b43_phy_maskset(dev, B43_NPHY_C1_CGAINI,
830 ~B43_NPHY_C1_CGAINI_GAINBKOFF, 0x1);
831 b43_phy_maskset(dev, B43_NPHY_C2_CGAINI,
832 ~B43_NPHY_C2_CGAINI_GAINBKOFF, 0x1);
833 b43_phy_maskset(dev, B43_NPHY_C1_CCK_CGAINI,
834 ~B43_NPHY_C1_CCK_CGAINI_GAINBKOFF, 0x1);
835 b43_phy_maskset(dev, B43_NPHY_C2_CCK_CGAINI,
836 ~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1);
837 }
838
839 b43_phy_set(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
840
841 if (nphy->gain_boost) {
842 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ &&
843 dev->phy.is_40mhz)
844 code = 4;
845 else
846 code = 5;
847 } else {
848 code = dev->phy.is_40mhz ? 6 : 7;
849 }
308 850
309 /* Set HPVGA2 index */ 851 /* Set HPVGA2 index */
310 b43_phy_maskset(dev, B43_NPHY_C1_INITGAIN, 852 b43_phy_maskset(dev, B43_NPHY_C1_INITGAIN,
311 ~B43_NPHY_C1_INITGAIN_HPVGA2, 853 ~B43_NPHY_C1_INITGAIN_HPVGA2,
312 6 << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT); 854 code << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT);
313 b43_phy_maskset(dev, B43_NPHY_C2_INITGAIN, 855 b43_phy_maskset(dev, B43_NPHY_C2_INITGAIN,
314 ~B43_NPHY_C2_INITGAIN_HPVGA2, 856 ~B43_NPHY_C2_INITGAIN_HPVGA2,
315 6 << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT); 857 code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT);
858
859 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
860 b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
861 (code << 8 | 0x7C));
862 b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
863 (code << 8 | 0x7C));
864
865 /* TODO: b43_nphy_adjust_lna_gain_table(dev); */
866
867 if (nphy->elna_gain_config) {
868 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808);
869 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
870 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
871 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
872 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
873
874 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0C08);
875 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
876 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
877 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
878 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
879
880 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
881 b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
882 (code << 8 | 0x74));
883 b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
884 (code << 8 | 0x74));
885 }
316 886
317 //FIXME verify that the specs really mean to use autoinc here. 887 if (dev->phy.rev == 2) {
318 for (i = 0; i < 3; i++) 888 for (i = 0; i < 4; i++) {
319 b43_ntab_write(dev, B43_NTAB16(7, 0x106) + i, 0x673); 889 b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
890 (0x0400 * i) + 0x0020);
891 for (j = 0; j < 21; j++)
892 b43_phy_write(dev,
893 B43_NPHY_TABLE_DATALO, 3 * j);
894 }
895
896 b43_nphy_set_rf_sequence(dev, 5,
897 rfseq_events, rfseq_delays, 3);
898 b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
899 (u16)~B43_NPHY_OVER_DGAIN_CCKDGECV,
900 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
901
902 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
903 b43_phy_maskset(dev, B43_PHY_N(0xC5D),
904 0xFF80, 4);
905 }
320 } 906 }
907}
321 908
322 /* Set minimum gain value */ 909/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
323 b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN, 910static void b43_nphy_workarounds(struct b43_wldev *dev)
324 ~B43_NPHY_C1_MINGAIN, 911{
325 23 << B43_NPHY_C1_MINGAIN_SHIFT); 912 struct ssb_bus *bus = dev->dev->bus;
326 b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN, 913 struct b43_phy *phy = &dev->phy;
327 ~B43_NPHY_C2_MINGAIN, 914 struct b43_phy_n *nphy = phy->n;
328 23 << B43_NPHY_C2_MINGAIN_SHIFT);
329 915
330 if (phy->rev < 2) { 916 u8 events1[7] = { 0x0, 0x1, 0x2, 0x8, 0x4, 0x5, 0x3 };
331 b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL, 917 u8 delays1[7] = { 0x8, 0x6, 0x6, 0x2, 0x4, 0x3C, 0x1 };
332 ~B43_NPHY_SCRAM_SIGCTL_SCM); 918
919 u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
920 u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
921
922 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
923 b43_nphy_classifier(dev, 1, 0);
924 else
925 b43_nphy_classifier(dev, 1, 1);
926
927 if (nphy->hang_avoid)
928 b43_nphy_stay_in_carrier_search(dev, 1);
929
930 b43_phy_set(dev, B43_NPHY_IQFLIP,
931 B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
932
933 if (dev->phy.rev >= 3) {
934 /* TODO */
935 } else {
936 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
937 nphy->band5g_pwrgain) {
938 b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
939 b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8);
940 } else {
941 b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8);
942 b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8);
943 }
944
945 /* TODO: convert to b43_ntab_write? */
946 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2000);
947 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x000A);
948 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2010);
949 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x000A);
950 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2002);
951 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0xCDAA);
952 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2012);
953 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0xCDAA);
954
955 if (dev->phy.rev < 2) {
956 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2008);
957 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0000);
958 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2018);
959 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0000);
960 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2007);
961 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x7AAB);
962 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2017);
963 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x7AAB);
964 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2006);
965 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0800);
966 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2016);
967 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0800);
968 }
969
970 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
971 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
972 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
973 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
974
975 if (bus->sprom.boardflags2_lo & 0x100 &&
976 bus->boardinfo.type == 0x8B) {
977 delays1[0] = 0x1;
978 delays1[5] = 0x14;
979 }
980 b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
981 b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
982
983 b43_nphy_gain_crtl_workarounds(dev);
984
985 if (dev->phy.rev < 2) {
986 if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2)
987 ; /*TODO: b43_mhf(dev, 2, 0x0010, 0x0010, 3);*/
988 } else if (dev->phy.rev == 2) {
989 b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0);
990 b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0);
991 }
992
993 if (dev->phy.rev < 2)
994 b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL,
995 ~B43_NPHY_SCRAM_SIGCTL_SCM);
996
997 /* Set phase track alpha and beta */
998 b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125);
999 b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3);
1000 b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105);
1001 b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E);
1002 b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
1003 b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
1004
1005 b43_phy_mask(dev, B43_NPHY_PIL_DW1,
1006 (u16)~B43_NPHY_PIL_DW_64QAM);
1007 b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
1008 b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
1009 b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
1010
1011 if (dev->phy.rev == 2)
1012 b43_phy_set(dev, B43_NPHY_FINERX2_CGC,
1013 B43_NPHY_FINERX2_CGC_DECGC);
333 } 1014 }
334 1015
335 /* Set phase track alpha and beta */ 1016 if (nphy->hang_avoid)
336 b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125); 1017 b43_nphy_stay_in_carrier_search(dev, 0);
337 b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3);
338 b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105);
339 b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E);
340 b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
341 b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
342} 1018}
343 1019
344static void b43_nphy_reset_cca(struct b43_wldev *dev) 1020/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */
1021static int b43_nphy_load_samples(struct b43_wldev *dev,
1022 struct b43_c32 *samples, u16 len) {
1023 struct b43_phy_n *nphy = dev->phy.n;
1024 u16 i;
1025 u32 *data;
1026
1027 data = kzalloc(len * sizeof(u32), GFP_KERNEL);
1028 if (!data) {
1029 b43err(dev->wl, "allocation for samples loading failed\n");
1030 return -ENOMEM;
1031 }
1032 if (nphy->hang_avoid)
1033 b43_nphy_stay_in_carrier_search(dev, 1);
1034
1035 for (i = 0; i < len; i++) {
1036 data[i] = (samples[i].i & 0x3FF << 10);
1037 data[i] |= samples[i].q & 0x3FF;
1038 }
1039 b43_ntab_write_bulk(dev, B43_NTAB32(17, 0), len, data);
1040
1041 kfree(data);
1042 if (nphy->hang_avoid)
1043 b43_nphy_stay_in_carrier_search(dev, 0);
1044 return 0;
1045}
1046
1047/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GenLoadSamples */
1048static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
1049 bool test)
345{ 1050{
346 u16 bbcfg; 1051 int i;
1052 u16 bw, len, rot, angle;
1053 struct b43_c32 *samples;
347 1054
348 ssb_write32(dev->dev, SSB_TMSLOW, 1055
349 ssb_read32(dev->dev, SSB_TMSLOW) | SSB_TMSLOW_FGC); 1056 bw = (dev->phy.is_40mhz) ? 40 : 20;
350 bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG); 1057 len = bw << 3;
351 b43_phy_set(dev, B43_NPHY_BBCFG, B43_NPHY_BBCFG_RSTCCA); 1058
352 b43_phy_write(dev, B43_NPHY_BBCFG, 1059 if (test) {
353 bbcfg & ~B43_NPHY_BBCFG_RSTCCA); 1060 if (b43_phy_read(dev, B43_NPHY_BBCFG) & B43_NPHY_BBCFG_RSTRX)
354 ssb_write32(dev->dev, SSB_TMSLOW, 1061 bw = 82;
355 ssb_read32(dev->dev, SSB_TMSLOW) & ~SSB_TMSLOW_FGC); 1062 else
1063 bw = 80;
1064
1065 if (dev->phy.is_40mhz)
1066 bw <<= 1;
1067
1068 len = bw << 1;
1069 }
1070
1071 samples = kzalloc(len * sizeof(struct b43_c32), GFP_KERNEL);
1072 if (!samples) {
1073 b43err(dev->wl, "allocation for samples generation failed\n");
1074 return 0;
1075 }
1076 rot = (((freq * 36) / bw) << 16) / 100;
1077 angle = 0;
1078
1079 for (i = 0; i < len; i++) {
1080 samples[i] = b43_cordic(angle);
1081 angle += rot;
1082 samples[i].q = CORDIC_CONVERT(samples[i].q * max);
1083 samples[i].i = CORDIC_CONVERT(samples[i].i * max);
1084 }
1085
1086 i = b43_nphy_load_samples(dev, samples, len);
1087 kfree(samples);
1088 return (i < 0) ? 0 : len;
356} 1089}
357 1090
358enum b43_nphy_rf_sequence { 1091/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RunSamples */
359 B43_RFSEQ_RX2TX, 1092static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
360 B43_RFSEQ_TX2RX, 1093 u16 wait, bool iqmode, bool dac_test)
361 B43_RFSEQ_RESET2RX, 1094{
362 B43_RFSEQ_UPDATE_GAINH, 1095 struct b43_phy_n *nphy = dev->phy.n;
363 B43_RFSEQ_UPDATE_GAINL, 1096 int i;
364 B43_RFSEQ_UPDATE_GAINU, 1097 u16 seq_mode;
365}; 1098 u32 tmp;
1099
1100 if (nphy->hang_avoid)
1101 b43_nphy_stay_in_carrier_search(dev, true);
1102
1103 if ((nphy->bb_mult_save & 0x80000000) == 0) {
1104 tmp = b43_ntab_read(dev, B43_NTAB16(15, 87));
1105 nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000;
1106 }
1107
1108 if (!dev->phy.is_40mhz)
1109 tmp = 0x6464;
1110 else
1111 tmp = 0x4747;
1112 b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
1113
1114 if (nphy->hang_avoid)
1115 b43_nphy_stay_in_carrier_search(dev, false);
1116
1117 b43_phy_write(dev, B43_NPHY_SAMP_DEPCNT, (samps - 1));
1118
1119 if (loops != 0xFFFF)
1120 b43_phy_write(dev, B43_NPHY_SAMP_LOOPCNT, (loops - 1));
1121 else
1122 b43_phy_write(dev, B43_NPHY_SAMP_LOOPCNT, loops);
1123
1124 b43_phy_write(dev, B43_NPHY_SAMP_WAITCNT, wait);
1125
1126 seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE);
1127
1128 b43_phy_set(dev, B43_NPHY_RFSEQMODE, B43_NPHY_RFSEQMODE_CAOVER);
1129 if (iqmode) {
1130 b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF);
1131 b43_phy_set(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8000);
1132 } else {
1133 if (dac_test)
1134 b43_phy_write(dev, B43_NPHY_SAMP_CMD, 5);
1135 else
1136 b43_phy_write(dev, B43_NPHY_SAMP_CMD, 1);
1137 }
1138 for (i = 0; i < 100; i++) {
1139 if (b43_phy_read(dev, B43_NPHY_RFSEQST) & 1) {
1140 i = 0;
1141 break;
1142 }
1143 udelay(10);
1144 }
1145 if (i)
1146 b43err(dev->wl, "run samples timeout\n");
1147
1148 b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
1149}
1150
1151/*
1152 * Transmits a known value for LO calibration
1153 * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone
1154 */
1155static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val,
1156 bool iqmode, bool dac_test)
1157{
1158 u16 samp = b43_nphy_gen_load_samples(dev, freq, max_val, dac_test);
1159 if (samp == 0)
1160 return -1;
1161 b43_nphy_run_samples(dev, samp, 0xFFFF, 0, iqmode, dac_test);
1162 return 0;
1163}
1164
1165/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */
1166static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev)
1167{
1168 struct b43_phy_n *nphy = dev->phy.n;
1169 int i, j;
1170 u32 tmp;
1171 u32 cur_real, cur_imag, real_part, imag_part;
1172
1173 u16 buffer[7];
1174
1175 if (nphy->hang_avoid)
1176 b43_nphy_stay_in_carrier_search(dev, true);
1177
1178 b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
1179
1180 for (i = 0; i < 2; i++) {
1181 tmp = ((buffer[i * 2] & 0x3FF) << 10) |
1182 (buffer[i * 2 + 1] & 0x3FF);
1183 b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
1184 (((i + 26) << 10) | 320));
1185 for (j = 0; j < 128; j++) {
1186 b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
1187 ((tmp >> 16) & 0xFFFF));
1188 b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
1189 (tmp & 0xFFFF));
1190 }
1191 }
1192
1193 for (i = 0; i < 2; i++) {
1194 tmp = buffer[5 + i];
1195 real_part = (tmp >> 8) & 0xFF;
1196 imag_part = (tmp & 0xFF);
1197 b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
1198 (((i + 26) << 10) | 448));
1199
1200 if (dev->phy.rev >= 3) {
1201 cur_real = real_part;
1202 cur_imag = imag_part;
1203 tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF);
1204 }
1205
1206 for (j = 0; j < 128; j++) {
1207 if (dev->phy.rev < 3) {
1208 cur_real = (real_part * loscale[j] + 128) >> 8;
1209 cur_imag = (imag_part * loscale[j] + 128) >> 8;
1210 tmp = ((cur_real & 0xFF) << 8) |
1211 (cur_imag & 0xFF);
1212 }
1213 b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
1214 ((tmp >> 16) & 0xFFFF));
1215 b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
1216 (tmp & 0xFFFF));
1217 }
1218 }
1219
1220 if (dev->phy.rev >= 3) {
1221 b43_shm_write16(dev, B43_SHM_SHARED,
1222 B43_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF);
1223 b43_shm_write16(dev, B43_SHM_SHARED,
1224 B43_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF);
1225 }
1226
1227 if (nphy->hang_avoid)
1228 b43_nphy_stay_in_carrier_search(dev, false);
1229}
1230
1231/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */
1232static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
1233 u8 *events, u8 *delays, u8 length)
1234{
1235 struct b43_phy_n *nphy = dev->phy.n;
1236 u8 i;
1237 u8 end = (dev->phy.rev >= 3) ? 0x1F : 0x0F;
1238 u16 offset1 = cmd << 4;
1239 u16 offset2 = offset1 + 0x80;
366 1240
1241 if (nphy->hang_avoid)
1242 b43_nphy_stay_in_carrier_search(dev, true);
1243
1244 b43_ntab_write_bulk(dev, B43_NTAB8(7, offset1), length, events);
1245 b43_ntab_write_bulk(dev, B43_NTAB8(7, offset2), length, delays);
1246
1247 for (i = length; i < 16; i++) {
1248 b43_ntab_write(dev, B43_NTAB8(7, offset1 + i), end);
1249 b43_ntab_write(dev, B43_NTAB8(7, offset2 + i), 1);
1250 }
1251
1252 if (nphy->hang_avoid)
1253 b43_nphy_stay_in_carrier_search(dev, false);
1254}
1255
1256/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */
367static void b43_nphy_force_rf_sequence(struct b43_wldev *dev, 1257static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
368 enum b43_nphy_rf_sequence seq) 1258 enum b43_nphy_rf_sequence seq)
369{ 1259{
@@ -376,6 +1266,7 @@ static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
376 [B43_RFSEQ_UPDATE_GAINU] = B43_NPHY_RFSEQTR_UPGU, 1266 [B43_RFSEQ_UPDATE_GAINU] = B43_NPHY_RFSEQTR_UPGU,
377 }; 1267 };
378 int i; 1268 int i;
1269 u16 seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE);
379 1270
380 B43_WARN_ON(seq >= ARRAY_SIZE(trigger)); 1271 B43_WARN_ON(seq >= ARRAY_SIZE(trigger));
381 1272
@@ -389,8 +1280,181 @@ static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
389 } 1280 }
390 b43err(dev->wl, "RF sequence status timeout\n"); 1281 b43err(dev->wl, "RF sequence status timeout\n");
391ok: 1282ok:
392 b43_phy_mask(dev, B43_NPHY_RFSEQMODE, 1283 b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
393 ~(B43_NPHY_RFSEQMODE_CAOVER | B43_NPHY_RFSEQMODE_TROVER)); 1284}
1285
1286/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
1287static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
1288 u16 value, u8 core, bool off)
1289{
1290 int i;
1291 u8 index = fls(field);
1292 u8 addr, en_addr, val_addr;
1293 /* we expect only one bit set */
1294 B43_WARN_ON(field & (~(1 << (index - 1))));
1295
1296 if (dev->phy.rev >= 3) {
1297 const struct nphy_rf_control_override_rev3 *rf_ctrl;
1298 for (i = 0; i < 2; i++) {
1299 if (index == 0 || index == 16) {
1300 b43err(dev->wl,
1301 "Unsupported RF Ctrl Override call\n");
1302 return;
1303 }
1304
1305 rf_ctrl = &tbl_rf_control_override_rev3[index - 1];
1306 en_addr = B43_PHY_N((i == 0) ?
1307 rf_ctrl->en_addr0 : rf_ctrl->en_addr1);
1308 val_addr = B43_PHY_N((i == 0) ?
1309 rf_ctrl->val_addr0 : rf_ctrl->val_addr1);
1310
1311 if (off) {
1312 b43_phy_mask(dev, en_addr, ~(field));
1313 b43_phy_mask(dev, val_addr,
1314 ~(rf_ctrl->val_mask));
1315 } else {
1316 if (core == 0 || ((1 << core) & i) != 0) {
1317 b43_phy_set(dev, en_addr, field);
1318 b43_phy_maskset(dev, val_addr,
1319 ~(rf_ctrl->val_mask),
1320 (value << rf_ctrl->val_shift));
1321 }
1322 }
1323 }
1324 } else {
1325 const struct nphy_rf_control_override_rev2 *rf_ctrl;
1326 if (off) {
1327 b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~(field));
1328 value = 0;
1329 } else {
1330 b43_phy_set(dev, B43_NPHY_RFCTL_OVER, field);
1331 }
1332
1333 for (i = 0; i < 2; i++) {
1334 if (index <= 1 || index == 16) {
1335 b43err(dev->wl,
1336 "Unsupported RF Ctrl Override call\n");
1337 return;
1338 }
1339
1340 if (index == 2 || index == 10 ||
1341 (index >= 13 && index <= 15)) {
1342 core = 1;
1343 }
1344
1345 rf_ctrl = &tbl_rf_control_override_rev2[index - 2];
1346 addr = B43_PHY_N((i == 0) ?
1347 rf_ctrl->addr0 : rf_ctrl->addr1);
1348
1349 if ((core & (1 << i)) != 0)
1350 b43_phy_maskset(dev, addr, ~(rf_ctrl->bmask),
1351 (value << rf_ctrl->shift));
1352
1353 b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1);
1354 b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
1355 B43_NPHY_RFCTL_CMD_START);
1356 udelay(1);
1357 b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, 0xFFFE);
1358 }
1359 }
1360}
1361
1362/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
1363static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
1364 u16 value, u8 core)
1365{
1366 u8 i, j;
1367 u16 reg, tmp, val;
1368
1369 B43_WARN_ON(dev->phy.rev < 3);
1370 B43_WARN_ON(field > 4);
1371
1372 for (i = 0; i < 2; i++) {
1373 if ((core == 1 && i == 1) || (core == 2 && !i))
1374 continue;
1375
1376 reg = (i == 0) ?
1377 B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
1378 b43_phy_mask(dev, reg, 0xFBFF);
1379
1380 switch (field) {
1381 case 0:
1382 b43_phy_write(dev, reg, 0);
1383 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
1384 break;
1385 case 1:
1386 if (!i) {
1387 b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC1,
1388 0xFC3F, (value << 6));
1389 b43_phy_maskset(dev, B43_NPHY_TXF_40CO_B1S1,
1390 0xFFFE, 1);
1391 b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
1392 B43_NPHY_RFCTL_CMD_START);
1393 for (j = 0; j < 100; j++) {
1394 if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_START) {
1395 j = 0;
1396 break;
1397 }
1398 udelay(10);
1399 }
1400 if (j)
1401 b43err(dev->wl,
1402 "intc override timeout\n");
1403 b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1,
1404 0xFFFE);
1405 } else {
1406 b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC2,
1407 0xFC3F, (value << 6));
1408 b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
1409 0xFFFE, 1);
1410 b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
1411 B43_NPHY_RFCTL_CMD_RXTX);
1412 for (j = 0; j < 100; j++) {
1413 if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_RXTX) {
1414 j = 0;
1415 break;
1416 }
1417 udelay(10);
1418 }
1419 if (j)
1420 b43err(dev->wl,
1421 "intc override timeout\n");
1422 b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
1423 0xFFFE);
1424 }
1425 break;
1426 case 2:
1427 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
1428 tmp = 0x0020;
1429 val = value << 5;
1430 } else {
1431 tmp = 0x0010;
1432 val = value << 4;
1433 }
1434 b43_phy_maskset(dev, reg, ~tmp, val);
1435 break;
1436 case 3:
1437 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
1438 tmp = 0x0001;
1439 val = value;
1440 } else {
1441 tmp = 0x0004;
1442 val = value << 2;
1443 }
1444 b43_phy_maskset(dev, reg, ~tmp, val);
1445 break;
1446 case 4:
1447 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
1448 tmp = 0x0002;
1449 val = value << 1;
1450 } else {
1451 tmp = 0x0008;
1452 val = value << 3;
1453 }
1454 b43_phy_maskset(dev, reg, ~tmp, val);
1455 break;
1456 }
1457 }
394} 1458}
395 1459
396static void b43_nphy_bphy_init(struct b43_wldev *dev) 1460static void b43_nphy_bphy_init(struct b43_wldev *dev)
@@ -411,81 +1475,1680 @@ static void b43_nphy_bphy_init(struct b43_wldev *dev)
411 b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668); 1475 b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668);
412} 1476}
413 1477
414/* RSSI Calibration */ 1478/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */
415static void b43_nphy_rssi_cal(struct b43_wldev *dev, u8 type) 1479static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale,
1480 s8 offset, u8 core, u8 rail, u8 type)
416{ 1481{
417 //TODO 1482 u16 tmp;
1483 bool core1or5 = (core == 1) || (core == 5);
1484 bool core2or5 = (core == 2) || (core == 5);
1485
1486 offset = clamp_val(offset, -32, 31);
1487 tmp = ((scale & 0x3F) << 8) | (offset & 0x3F);
1488
1489 if (core1or5 && (rail == 0) && (type == 2))
1490 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, tmp);
1491 if (core1or5 && (rail == 1) && (type == 2))
1492 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, tmp);
1493 if (core2or5 && (rail == 0) && (type == 2))
1494 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, tmp);
1495 if (core2or5 && (rail == 1) && (type == 2))
1496 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, tmp);
1497 if (core1or5 && (rail == 0) && (type == 0))
1498 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, tmp);
1499 if (core1or5 && (rail == 1) && (type == 0))
1500 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, tmp);
1501 if (core2or5 && (rail == 0) && (type == 0))
1502 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, tmp);
1503 if (core2or5 && (rail == 1) && (type == 0))
1504 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, tmp);
1505 if (core1or5 && (rail == 0) && (type == 1))
1506 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, tmp);
1507 if (core1or5 && (rail == 1) && (type == 1))
1508 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, tmp);
1509 if (core2or5 && (rail == 0) && (type == 1))
1510 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, tmp);
1511 if (core2or5 && (rail == 1) && (type == 1))
1512 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, tmp);
1513 if (core1or5 && (rail == 0) && (type == 6))
1514 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TBD, tmp);
1515 if (core1or5 && (rail == 1) && (type == 6))
1516 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TBD, tmp);
1517 if (core2or5 && (rail == 0) && (type == 6))
1518 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TBD, tmp);
1519 if (core2or5 && (rail == 1) && (type == 6))
1520 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TBD, tmp);
1521 if (core1or5 && (rail == 0) && (type == 3))
1522 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_PWRDET, tmp);
1523 if (core1or5 && (rail == 1) && (type == 3))
1524 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_PWRDET, tmp);
1525 if (core2or5 && (rail == 0) && (type == 3))
1526 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_PWRDET, tmp);
1527 if (core2or5 && (rail == 1) && (type == 3))
1528 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_PWRDET, tmp);
1529 if (core1or5 && (type == 4))
1530 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TSSI, tmp);
1531 if (core2or5 && (type == 4))
1532 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TSSI, tmp);
1533 if (core1or5 && (type == 5))
1534 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TSSI, tmp);
1535 if (core2or5 && (type == 5))
1536 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TSSI, tmp);
1537}
1538
1539static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
1540{
1541 u16 val;
1542
1543 if (type < 3)
1544 val = 0;
1545 else if (type == 6)
1546 val = 1;
1547 else if (type == 3)
1548 val = 2;
1549 else
1550 val = 3;
1551
1552 val = (val << 12) | (val << 14);
1553 b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val);
1554 b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val);
1555
1556 if (type < 3) {
1557 b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF,
1558 (type + 1) << 4);
1559 b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF,
1560 (type + 1) << 4);
1561 }
1562
1563 /* TODO use some definitions */
1564 if (code == 0) {
1565 b43_phy_maskset(dev, B43_NPHY_AFECTL_OVER, 0xCFFF, 0);
1566 if (type < 3) {
1567 b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFEC7, 0);
1568 b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xEFDC, 0);
1569 b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFFFE, 0);
1570 udelay(20);
1571 b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xFFFE, 0);
1572 }
1573 } else {
1574 b43_phy_maskset(dev, B43_NPHY_AFECTL_OVER, 0xCFFF,
1575 0x3000);
1576 if (type < 3) {
1577 b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
1578 0xFEC7, 0x0180);
1579 b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
1580 0xEFDC, (code << 1 | 0x1021));
1581 b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFFFE, 0x1);
1582 udelay(20);
1583 b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xFFFE, 0);
1584 }
1585 }
1586}
1587
1588static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
1589{
1590 struct b43_phy_n *nphy = dev->phy.n;
1591 u8 i;
1592 u16 reg, val;
1593
1594 if (code == 0) {
1595 b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, 0xFDFF);
1596 b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, 0xFDFF);
1597 b43_phy_mask(dev, B43_NPHY_AFECTL_C1, 0xFCFF);
1598 b43_phy_mask(dev, B43_NPHY_AFECTL_C2, 0xFCFF);
1599 b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S0, 0xFFDF);
1600 b43_phy_mask(dev, B43_NPHY_TXF_40CO_B32S1, 0xFFDF);
1601 b43_phy_mask(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0xFFC3);
1602 b43_phy_mask(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0xFFC3);
1603 } else {
1604 for (i = 0; i < 2; i++) {
1605 if ((code == 1 && i == 1) || (code == 2 && !i))
1606 continue;
1607
1608 reg = (i == 0) ?
1609 B43_NPHY_AFECTL_OVER1 : B43_NPHY_AFECTL_OVER;
1610 b43_phy_maskset(dev, reg, 0xFDFF, 0x0200);
1611
1612 if (type < 3) {
1613 reg = (i == 0) ?
1614 B43_NPHY_AFECTL_C1 :
1615 B43_NPHY_AFECTL_C2;
1616 b43_phy_maskset(dev, reg, 0xFCFF, 0);
1617
1618 reg = (i == 0) ?
1619 B43_NPHY_RFCTL_LUT_TRSW_UP1 :
1620 B43_NPHY_RFCTL_LUT_TRSW_UP2;
1621 b43_phy_maskset(dev, reg, 0xFFC3, 0);
1622
1623 if (type == 0)
1624 val = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 4 : 8;
1625 else if (type == 1)
1626 val = 16;
1627 else
1628 val = 32;
1629 b43_phy_set(dev, reg, val);
1630
1631 reg = (i == 0) ?
1632 B43_NPHY_TXF_40CO_B1S0 :
1633 B43_NPHY_TXF_40CO_B32S1;
1634 b43_phy_set(dev, reg, 0x0020);
1635 } else {
1636 if (type == 6)
1637 val = 0x0100;
1638 else if (type == 3)
1639 val = 0x0200;
1640 else
1641 val = 0x0300;
1642
1643 reg = (i == 0) ?
1644 B43_NPHY_AFECTL_C1 :
1645 B43_NPHY_AFECTL_C2;
1646
1647 b43_phy_maskset(dev, reg, 0xFCFF, val);
1648 b43_phy_maskset(dev, reg, 0xF3FF, val << 2);
1649
1650 if (type != 3 && type != 6) {
1651 enum ieee80211_band band =
1652 b43_current_band(dev->wl);
1653
1654 if ((nphy->ipa2g_on &&
1655 band == IEEE80211_BAND_2GHZ) ||
1656 (nphy->ipa5g_on &&
1657 band == IEEE80211_BAND_5GHZ))
1658 val = (band == IEEE80211_BAND_5GHZ) ? 0xC : 0xE;
1659 else
1660 val = 0x11;
1661 reg = (i == 0) ? 0x2000 : 0x3000;
1662 reg |= B2055_PADDRV;
1663 b43_radio_write16(dev, reg, val);
1664
1665 reg = (i == 0) ?
1666 B43_NPHY_AFECTL_OVER1 :
1667 B43_NPHY_AFECTL_OVER;
1668 b43_phy_set(dev, reg, 0x0200);
1669 }
1670 }
1671 }
1672 }
1673}
1674
1675/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */
1676static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
1677{
1678 if (dev->phy.rev >= 3)
1679 b43_nphy_rev3_rssi_select(dev, code, type);
1680 else
1681 b43_nphy_rev2_rssi_select(dev, code, type);
1682}
1683
1684/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRssi2055Vcm */
1685static void b43_nphy_set_rssi_2055_vcm(struct b43_wldev *dev, u8 type, u8 *buf)
1686{
1687 int i;
1688 for (i = 0; i < 2; i++) {
1689 if (type == 2) {
1690 if (i == 0) {
1691 b43_radio_maskset(dev, B2055_C1_B0NB_RSSIVCM,
1692 0xFC, buf[0]);
1693 b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5,
1694 0xFC, buf[1]);
1695 } else {
1696 b43_radio_maskset(dev, B2055_C2_B0NB_RSSIVCM,
1697 0xFC, buf[2 * i]);
1698 b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5,
1699 0xFC, buf[2 * i + 1]);
1700 }
1701 } else {
1702 if (i == 0)
1703 b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5,
1704 0xF3, buf[0] << 2);
1705 else
1706 b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5,
1707 0xF3, buf[2 * i + 1] << 2);
1708 }
1709 }
1710}
1711
1712/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PollRssi */
1713static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
1714 u8 nsamp)
1715{
1716 int i;
1717 int out;
1718 u16 save_regs_phy[9];
1719 u16 s[2];
1720
1721 if (dev->phy.rev >= 3) {
1722 save_regs_phy[0] = b43_phy_read(dev,
1723 B43_NPHY_RFCTL_LUT_TRSW_UP1);
1724 save_regs_phy[1] = b43_phy_read(dev,
1725 B43_NPHY_RFCTL_LUT_TRSW_UP2);
1726 save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
1727 save_regs_phy[3] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
1728 save_regs_phy[4] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
1729 save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
1730 save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0);
1731 save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1);
1732 }
1733
1734 b43_nphy_rssi_select(dev, 5, type);
1735
1736 if (dev->phy.rev < 2) {
1737 save_regs_phy[8] = b43_phy_read(dev, B43_NPHY_GPIO_SEL);
1738 b43_phy_write(dev, B43_NPHY_GPIO_SEL, 5);
1739 }
1740
1741 for (i = 0; i < 4; i++)
1742 buf[i] = 0;
1743
1744 for (i = 0; i < nsamp; i++) {
1745 if (dev->phy.rev < 2) {
1746 s[0] = b43_phy_read(dev, B43_NPHY_GPIO_LOOUT);
1747 s[1] = b43_phy_read(dev, B43_NPHY_GPIO_HIOUT);
1748 } else {
1749 s[0] = b43_phy_read(dev, B43_NPHY_RSSI1);
1750 s[1] = b43_phy_read(dev, B43_NPHY_RSSI2);
1751 }
1752
1753 buf[0] += ((s8)((s[0] & 0x3F) << 2)) >> 2;
1754 buf[1] += ((s8)(((s[0] >> 8) & 0x3F) << 2)) >> 2;
1755 buf[2] += ((s8)((s[1] & 0x3F) << 2)) >> 2;
1756 buf[3] += ((s8)(((s[1] >> 8) & 0x3F) << 2)) >> 2;
1757 }
1758 out = (buf[0] & 0xFF) << 24 | (buf[1] & 0xFF) << 16 |
1759 (buf[2] & 0xFF) << 8 | (buf[3] & 0xFF);
1760
1761 if (dev->phy.rev < 2)
1762 b43_phy_write(dev, B43_NPHY_GPIO_SEL, save_regs_phy[8]);
1763
1764 if (dev->phy.rev >= 3) {
1765 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1,
1766 save_regs_phy[0]);
1767 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2,
1768 save_regs_phy[1]);
1769 b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[2]);
1770 b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[3]);
1771 b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, save_regs_phy[4]);
1772 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]);
1773 b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]);
1774 b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]);
1775 }
1776
1777 return out;
1778}
1779
1780/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */
1781static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
1782{
1783 int i, j;
1784 u8 state[4];
1785 u8 code, val;
1786 u16 class, override;
1787 u8 regs_save_radio[2];
1788 u16 regs_save_phy[2];
1789 s8 offset[4];
1790
1791 u16 clip_state[2];
1792 u16 clip_off[2] = { 0xFFFF, 0xFFFF };
1793 s32 results_min[4] = { };
1794 u8 vcm_final[4] = { };
1795 s32 results[4][4] = { };
1796 s32 miniq[4][2] = { };
1797
1798 if (type == 2) {
1799 code = 0;
1800 val = 6;
1801 } else if (type < 2) {
1802 code = 25;
1803 val = 4;
1804 } else {
1805 B43_WARN_ON(1);
1806 return;
1807 }
1808
1809 class = b43_nphy_classifier(dev, 0, 0);
1810 b43_nphy_classifier(dev, 7, 4);
1811 b43_nphy_read_clip_detection(dev, clip_state);
1812 b43_nphy_write_clip_detection(dev, clip_off);
1813
1814 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
1815 override = 0x140;
1816 else
1817 override = 0x110;
1818
1819 regs_save_phy[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
1820 regs_save_radio[0] = b43_radio_read16(dev, B2055_C1_PD_RXTX);
1821 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, override);
1822 b43_radio_write16(dev, B2055_C1_PD_RXTX, val);
1823
1824 regs_save_phy[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
1825 regs_save_radio[1] = b43_radio_read16(dev, B2055_C2_PD_RXTX);
1826 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, override);
1827 b43_radio_write16(dev, B2055_C2_PD_RXTX, val);
1828
1829 state[0] = b43_radio_read16(dev, B2055_C1_PD_RSSIMISC) & 0x07;
1830 state[1] = b43_radio_read16(dev, B2055_C2_PD_RSSIMISC) & 0x07;
1831 b43_radio_mask(dev, B2055_C1_PD_RSSIMISC, 0xF8);
1832 b43_radio_mask(dev, B2055_C2_PD_RSSIMISC, 0xF8);
1833 state[2] = b43_radio_read16(dev, B2055_C1_SP_RSSI) & 0x07;
1834 state[3] = b43_radio_read16(dev, B2055_C2_SP_RSSI) & 0x07;
1835
1836 b43_nphy_rssi_select(dev, 5, type);
1837 b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 0, type);
1838 b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 1, type);
1839
1840 for (i = 0; i < 4; i++) {
1841 u8 tmp[4];
1842 for (j = 0; j < 4; j++)
1843 tmp[j] = i;
1844 if (type != 1)
1845 b43_nphy_set_rssi_2055_vcm(dev, type, tmp);
1846 b43_nphy_poll_rssi(dev, type, results[i], 8);
1847 if (type < 2)
1848 for (j = 0; j < 2; j++)
1849 miniq[i][j] = min(results[i][2 * j],
1850 results[i][2 * j + 1]);
1851 }
1852
1853 for (i = 0; i < 4; i++) {
1854 s32 mind = 40;
1855 u8 minvcm = 0;
1856 s32 minpoll = 249;
1857 s32 curr;
1858 for (j = 0; j < 4; j++) {
1859 if (type == 2)
1860 curr = abs(results[j][i]);
1861 else
1862 curr = abs(miniq[j][i / 2] - code * 8);
1863
1864 if (curr < mind) {
1865 mind = curr;
1866 minvcm = j;
1867 }
1868
1869 if (results[j][i] < minpoll)
1870 minpoll = results[j][i];
1871 }
1872 results_min[i] = minpoll;
1873 vcm_final[i] = minvcm;
1874 }
1875
1876 if (type != 1)
1877 b43_nphy_set_rssi_2055_vcm(dev, type, vcm_final);
1878
1879 for (i = 0; i < 4; i++) {
1880 offset[i] = (code * 8) - results[vcm_final[i]][i];
1881
1882 if (offset[i] < 0)
1883 offset[i] = -((abs(offset[i]) + 4) / 8);
1884 else
1885 offset[i] = (offset[i] + 4) / 8;
1886
1887 if (results_min[i] == 248)
1888 offset[i] = code - 32;
1889
1890 if (i % 2 == 0)
1891 b43_nphy_scale_offset_rssi(dev, 0, offset[i], 1, 0,
1892 type);
1893 else
1894 b43_nphy_scale_offset_rssi(dev, 0, offset[i], 2, 1,
1895 type);
1896 }
1897
1898 b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[0]);
1899 b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[1]);
1900
1901 switch (state[2]) {
1902 case 1:
1903 b43_nphy_rssi_select(dev, 1, 2);
1904 break;
1905 case 4:
1906 b43_nphy_rssi_select(dev, 1, 0);
1907 break;
1908 case 2:
1909 b43_nphy_rssi_select(dev, 1, 1);
1910 break;
1911 default:
1912 b43_nphy_rssi_select(dev, 1, 1);
1913 break;
1914 }
1915
1916 switch (state[3]) {
1917 case 1:
1918 b43_nphy_rssi_select(dev, 2, 2);
1919 break;
1920 case 4:
1921 b43_nphy_rssi_select(dev, 2, 0);
1922 break;
1923 default:
1924 b43_nphy_rssi_select(dev, 2, 1);
1925 break;
1926 }
1927
1928 b43_nphy_rssi_select(dev, 0, type);
1929
1930 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs_save_phy[0]);
1931 b43_radio_write16(dev, B2055_C1_PD_RXTX, regs_save_radio[0]);
1932 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs_save_phy[1]);
1933 b43_radio_write16(dev, B2055_C2_PD_RXTX, regs_save_radio[1]);
1934
1935 b43_nphy_classifier(dev, 7, class);
1936 b43_nphy_write_clip_detection(dev, clip_state);
1937}
1938
1939/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */
1940static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1941{
1942 /* TODO */
1943}
1944
1945/*
1946 * RSSI Calibration
1947 * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal
1948 */
1949static void b43_nphy_rssi_cal(struct b43_wldev *dev)
1950{
1951 if (dev->phy.rev >= 3) {
1952 b43_nphy_rev3_rssi_cal(dev);
1953 } else {
1954 b43_nphy_rev2_rssi_cal(dev, 2);
1955 b43_nphy_rev2_rssi_cal(dev, 0);
1956 b43_nphy_rev2_rssi_cal(dev, 1);
1957 }
418} 1958}
419 1959
1960/*
1961 * Restore RSSI Calibration
1962 * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal
1963 */
1964static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
1965{
1966 struct b43_phy_n *nphy = dev->phy.n;
1967
1968 u16 *rssical_radio_regs = NULL;
1969 u16 *rssical_phy_regs = NULL;
1970
1971 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
1972 if (!nphy->rssical_chanspec_2G)
1973 return;
1974 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
1975 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G;
1976 } else {
1977 if (!nphy->rssical_chanspec_5G)
1978 return;
1979 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G;
1980 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G;
1981 }
1982
1983 /* TODO use some definitions */
1984 b43_radio_maskset(dev, 0x602B, 0xE3, rssical_radio_regs[0]);
1985 b43_radio_maskset(dev, 0x702B, 0xE3, rssical_radio_regs[1]);
1986
1987 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, rssical_phy_regs[0]);
1988 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, rssical_phy_regs[1]);
1989 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, rssical_phy_regs[2]);
1990 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, rssical_phy_regs[3]);
1991
1992 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, rssical_phy_regs[4]);
1993 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, rssical_phy_regs[5]);
1994 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, rssical_phy_regs[6]);
1995 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, rssical_phy_regs[7]);
1996
1997 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, rssical_phy_regs[8]);
1998 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, rssical_phy_regs[9]);
1999 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, rssical_phy_regs[10]);
2000 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, rssical_phy_regs[11]);
2001}
2002
2003/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
2004static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
2005{
2006 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
2007 if (dev->phy.rev >= 6) {
2008 /* TODO If the chip is 47162
2009 return txpwrctrl_tx_gain_ipa_rev5 */
2010 return txpwrctrl_tx_gain_ipa_rev6;
2011 } else if (dev->phy.rev >= 5) {
2012 return txpwrctrl_tx_gain_ipa_rev5;
2013 } else {
2014 return txpwrctrl_tx_gain_ipa;
2015 }
2016 } else {
2017 return txpwrctrl_tx_gain_ipa_5g;
2018 }
2019}
2020
2021/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */
2022static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
2023{
2024 struct b43_phy_n *nphy = dev->phy.n;
2025 u16 *save = nphy->tx_rx_cal_radio_saveregs;
2026 u16 tmp;
2027 u8 offset, i;
2028
2029 if (dev->phy.rev >= 3) {
2030 for (i = 0; i < 2; i++) {
2031 tmp = (i == 0) ? 0x2000 : 0x3000;
2032 offset = i * 11;
2033
2034 save[offset + 0] = b43_radio_read16(dev, B2055_CAL_RVARCTL);
2035 save[offset + 1] = b43_radio_read16(dev, B2055_CAL_LPOCTL);
2036 save[offset + 2] = b43_radio_read16(dev, B2055_CAL_TS);
2037 save[offset + 3] = b43_radio_read16(dev, B2055_CAL_RCCALRTS);
2038 save[offset + 4] = b43_radio_read16(dev, B2055_CAL_RCALRTS);
2039 save[offset + 5] = b43_radio_read16(dev, B2055_PADDRV);
2040 save[offset + 6] = b43_radio_read16(dev, B2055_XOCTL1);
2041 save[offset + 7] = b43_radio_read16(dev, B2055_XOCTL2);
2042 save[offset + 8] = b43_radio_read16(dev, B2055_XOREGUL);
2043 save[offset + 9] = b43_radio_read16(dev, B2055_XOMISC);
2044 save[offset + 10] = b43_radio_read16(dev, B2055_PLL_LFC1);
2045
2046 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
2047 b43_radio_write16(dev, tmp | B2055_CAL_RVARCTL, 0x0A);
2048 b43_radio_write16(dev, tmp | B2055_CAL_LPOCTL, 0x40);
2049 b43_radio_write16(dev, tmp | B2055_CAL_TS, 0x55);
2050 b43_radio_write16(dev, tmp | B2055_CAL_RCCALRTS, 0);
2051 b43_radio_write16(dev, tmp | B2055_CAL_RCALRTS, 0);
2052 if (nphy->ipa5g_on) {
2053 b43_radio_write16(dev, tmp | B2055_PADDRV, 4);
2054 b43_radio_write16(dev, tmp | B2055_XOCTL1, 1);
2055 } else {
2056 b43_radio_write16(dev, tmp | B2055_PADDRV, 0);
2057 b43_radio_write16(dev, tmp | B2055_XOCTL1, 0x2F);
2058 }
2059 b43_radio_write16(dev, tmp | B2055_XOCTL2, 0);
2060 } else {
2061 b43_radio_write16(dev, tmp | B2055_CAL_RVARCTL, 0x06);
2062 b43_radio_write16(dev, tmp | B2055_CAL_LPOCTL, 0x40);
2063 b43_radio_write16(dev, tmp | B2055_CAL_TS, 0x55);
2064 b43_radio_write16(dev, tmp | B2055_CAL_RCCALRTS, 0);
2065 b43_radio_write16(dev, tmp | B2055_CAL_RCALRTS, 0);
2066 b43_radio_write16(dev, tmp | B2055_XOCTL1, 0);
2067 if (nphy->ipa2g_on) {
2068 b43_radio_write16(dev, tmp | B2055_PADDRV, 6);
2069 b43_radio_write16(dev, tmp | B2055_XOCTL2,
2070 (dev->phy.rev < 5) ? 0x11 : 0x01);
2071 } else {
2072 b43_radio_write16(dev, tmp | B2055_PADDRV, 0);
2073 b43_radio_write16(dev, tmp | B2055_XOCTL2, 0);
2074 }
2075 }
2076 b43_radio_write16(dev, tmp | B2055_XOREGUL, 0);
2077 b43_radio_write16(dev, tmp | B2055_XOMISC, 0);
2078 b43_radio_write16(dev, tmp | B2055_PLL_LFC1, 0);
2079 }
2080 } else {
2081 save[0] = b43_radio_read16(dev, B2055_C1_TX_RF_IQCAL1);
2082 b43_radio_write16(dev, B2055_C1_TX_RF_IQCAL1, 0x29);
2083
2084 save[1] = b43_radio_read16(dev, B2055_C1_TX_RF_IQCAL2);
2085 b43_radio_write16(dev, B2055_C1_TX_RF_IQCAL2, 0x54);
2086
2087 save[2] = b43_radio_read16(dev, B2055_C2_TX_RF_IQCAL1);
2088 b43_radio_write16(dev, B2055_C2_TX_RF_IQCAL1, 0x29);
2089
2090 save[3] = b43_radio_read16(dev, B2055_C2_TX_RF_IQCAL2);
2091 b43_radio_write16(dev, B2055_C2_TX_RF_IQCAL2, 0x54);
2092
2093 save[3] = b43_radio_read16(dev, B2055_C1_PWRDET_RXTX);
2094 save[4] = b43_radio_read16(dev, B2055_C2_PWRDET_RXTX);
2095
2096 if (!(b43_phy_read(dev, B43_NPHY_BANDCTL) &
2097 B43_NPHY_BANDCTL_5GHZ)) {
2098 b43_radio_write16(dev, B2055_C1_PWRDET_RXTX, 0x04);
2099 b43_radio_write16(dev, B2055_C2_PWRDET_RXTX, 0x04);
2100 } else {
2101 b43_radio_write16(dev, B2055_C1_PWRDET_RXTX, 0x20);
2102 b43_radio_write16(dev, B2055_C2_PWRDET_RXTX, 0x20);
2103 }
2104
2105 if (dev->phy.rev < 2) {
2106 b43_radio_set(dev, B2055_C1_TX_BB_MXGM, 0x20);
2107 b43_radio_set(dev, B2055_C2_TX_BB_MXGM, 0x20);
2108 } else {
2109 b43_radio_mask(dev, B2055_C1_TX_BB_MXGM, ~0x20);
2110 b43_radio_mask(dev, B2055_C2_TX_BB_MXGM, ~0x20);
2111 }
2112 }
2113}
2114
2115/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */
2116static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
2117 struct nphy_txgains target,
2118 struct nphy_iqcal_params *params)
2119{
2120 int i, j, indx;
2121 u16 gain;
2122
2123 if (dev->phy.rev >= 3) {
2124 params->txgm = target.txgm[core];
2125 params->pga = target.pga[core];
2126 params->pad = target.pad[core];
2127 params->ipa = target.ipa[core];
2128 params->cal_gain = (params->txgm << 12) | (params->pga << 8) |
2129 (params->pad << 4) | (params->ipa);
2130 for (j = 0; j < 5; j++)
2131 params->ncorr[j] = 0x79;
2132 } else {
2133 gain = (target.pad[core]) | (target.pga[core] << 4) |
2134 (target.txgm[core] << 8);
2135
2136 indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ?
2137 1 : 0;
2138 for (i = 0; i < 9; i++)
2139 if (tbl_iqcal_gainparams[indx][i][0] == gain)
2140 break;
2141 i = min(i, 8);
2142
2143 params->txgm = tbl_iqcal_gainparams[indx][i][1];
2144 params->pga = tbl_iqcal_gainparams[indx][i][2];
2145 params->pad = tbl_iqcal_gainparams[indx][i][3];
2146 params->cal_gain = (params->txgm << 7) | (params->pga << 4) |
2147 (params->pad << 2);
2148 for (j = 0; j < 4; j++)
2149 params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j];
2150 }
2151}
2152
2153/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */
2154static void b43_nphy_update_tx_cal_ladder(struct b43_wldev *dev, u16 core)
2155{
2156 struct b43_phy_n *nphy = dev->phy.n;
2157 int i;
2158 u16 scale, entry;
2159
2160 u16 tmp = nphy->txcal_bbmult;
2161 if (core == 0)
2162 tmp >>= 8;
2163 tmp &= 0xff;
2164
2165 for (i = 0; i < 18; i++) {
2166 scale = (ladder_lo[i].percent * tmp) / 100;
2167 entry = ((scale & 0xFF) << 8) | ladder_lo[i].g_env;
2168 b43_ntab_write(dev, B43_NTAB16(15, i), entry);
2169
2170 scale = (ladder_iq[i].percent * tmp) / 100;
2171 entry = ((scale & 0xFF) << 8) | ladder_iq[i].g_env;
2172 b43_ntab_write(dev, B43_NTAB16(15, i + 32), entry);
2173 }
2174}
2175
2176/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ExtPaSetTxDigiFilts */
2177static void b43_nphy_ext_pa_set_tx_dig_filters(struct b43_wldev *dev)
2178{
2179 int i;
2180 for (i = 0; i < 15; i++)
2181 b43_phy_write(dev, B43_PHY_N(0x2C5 + i),
2182 tbl_tx_filter_coef_rev4[2][i]);
2183}
2184
2185/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IpaSetTxDigiFilts */
2186static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev)
2187{
2188 int i, j;
2189 /* B43_NPHY_TXF_20CO_S0A1, B43_NPHY_TXF_40CO_S0A1, unknown */
2190 u16 offset[] = { 0x186, 0x195, 0x2C5 };
2191
2192 for (i = 0; i < 3; i++)
2193 for (j = 0; j < 15; j++)
2194 b43_phy_write(dev, B43_PHY_N(offset[i] + j),
2195 tbl_tx_filter_coef_rev4[i][j]);
2196
2197 if (dev->phy.is_40mhz) {
2198 for (j = 0; j < 15; j++)
2199 b43_phy_write(dev, B43_PHY_N(offset[0] + j),
2200 tbl_tx_filter_coef_rev4[3][j]);
2201 } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
2202 for (j = 0; j < 15; j++)
2203 b43_phy_write(dev, B43_PHY_N(offset[0] + j),
2204 tbl_tx_filter_coef_rev4[5][j]);
2205 }
2206
2207 if (dev->phy.channel == 14)
2208 for (j = 0; j < 15; j++)
2209 b43_phy_write(dev, B43_PHY_N(offset[0] + j),
2210 tbl_tx_filter_coef_rev4[6][j]);
2211}
2212
2213/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetTxGain */
2214static struct nphy_txgains b43_nphy_get_tx_gains(struct b43_wldev *dev)
2215{
2216 struct b43_phy_n *nphy = dev->phy.n;
2217
2218 u16 curr_gain[2];
2219 struct nphy_txgains target;
2220 const u32 *table = NULL;
2221
2222 if (nphy->txpwrctrl == 0) {
2223 int i;
2224
2225 if (nphy->hang_avoid)
2226 b43_nphy_stay_in_carrier_search(dev, true);
2227 b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, curr_gain);
2228 if (nphy->hang_avoid)
2229 b43_nphy_stay_in_carrier_search(dev, false);
2230
2231 for (i = 0; i < 2; ++i) {
2232 if (dev->phy.rev >= 3) {
2233 target.ipa[i] = curr_gain[i] & 0x000F;
2234 target.pad[i] = (curr_gain[i] & 0x00F0) >> 4;
2235 target.pga[i] = (curr_gain[i] & 0x0F00) >> 8;
2236 target.txgm[i] = (curr_gain[i] & 0x7000) >> 12;
2237 } else {
2238 target.ipa[i] = curr_gain[i] & 0x0003;
2239 target.pad[i] = (curr_gain[i] & 0x000C) >> 2;
2240 target.pga[i] = (curr_gain[i] & 0x0070) >> 4;
2241 target.txgm[i] = (curr_gain[i] & 0x0380) >> 7;
2242 }
2243 }
2244 } else {
2245 int i;
2246 u16 index[2];
2247 index[0] = (b43_phy_read(dev, B43_NPHY_C1_TXPCTL_STAT) &
2248 B43_NPHY_TXPCTL_STAT_BIDX) >>
2249 B43_NPHY_TXPCTL_STAT_BIDX_SHIFT;
2250 index[1] = (b43_phy_read(dev, B43_NPHY_C2_TXPCTL_STAT) &
2251 B43_NPHY_TXPCTL_STAT_BIDX) >>
2252 B43_NPHY_TXPCTL_STAT_BIDX_SHIFT;
2253
2254 for (i = 0; i < 2; ++i) {
2255 if (dev->phy.rev >= 3) {
2256 enum ieee80211_band band =
2257 b43_current_band(dev->wl);
2258
2259 if ((nphy->ipa2g_on &&
2260 band == IEEE80211_BAND_2GHZ) ||
2261 (nphy->ipa5g_on &&
2262 band == IEEE80211_BAND_5GHZ)) {
2263 table = b43_nphy_get_ipa_gain_table(dev);
2264 } else {
2265 if (band == IEEE80211_BAND_5GHZ) {
2266 if (dev->phy.rev == 3)
2267 table = b43_ntab_tx_gain_rev3_5ghz;
2268 else if (dev->phy.rev == 4)
2269 table = b43_ntab_tx_gain_rev4_5ghz;
2270 else
2271 table = b43_ntab_tx_gain_rev5plus_5ghz;
2272 } else {
2273 table = b43_ntab_tx_gain_rev3plus_2ghz;
2274 }
2275 }
2276
2277 target.ipa[i] = (table[index[i]] >> 16) & 0xF;
2278 target.pad[i] = (table[index[i]] >> 20) & 0xF;
2279 target.pga[i] = (table[index[i]] >> 24) & 0xF;
2280 target.txgm[i] = (table[index[i]] >> 28) & 0xF;
2281 } else {
2282 table = b43_ntab_tx_gain_rev0_1_2;
2283
2284 target.ipa[i] = (table[index[i]] >> 16) & 0x3;
2285 target.pad[i] = (table[index[i]] >> 18) & 0x3;
2286 target.pga[i] = (table[index[i]] >> 20) & 0x7;
2287 target.txgm[i] = (table[index[i]] >> 23) & 0x7;
2288 }
2289 }
2290 }
2291
2292 return target;
2293}
2294
2295/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhyCleanup */
2296static void b43_nphy_tx_cal_phy_cleanup(struct b43_wldev *dev)
2297{
2298 u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
2299
2300 if (dev->phy.rev >= 3) {
2301 b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[0]);
2302 b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]);
2303 b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]);
2304 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[3]);
2305 b43_phy_write(dev, B43_NPHY_BBCFG, regs[4]);
2306 b43_ntab_write(dev, B43_NTAB16(8, 3), regs[5]);
2307 b43_ntab_write(dev, B43_NTAB16(8, 19), regs[6]);
2308 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[7]);
2309 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[8]);
2310 b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]);
2311 b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]);
2312 b43_nphy_reset_cca(dev);
2313 } else {
2314 b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, regs[0]);
2315 b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, regs[1]);
2316 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]);
2317 b43_ntab_write(dev, B43_NTAB16(8, 2), regs[3]);
2318 b43_ntab_write(dev, B43_NTAB16(8, 18), regs[4]);
2319 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[5]);
2320 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[6]);
2321 }
2322}
2323
2324/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhySetup */
2325static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev)
2326{
2327 u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
2328 u16 tmp;
2329
2330 regs[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
2331 regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
2332 if (dev->phy.rev >= 3) {
2333 b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0xF0FF, 0x0A00);
2334 b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0xF0FF, 0x0A00);
2335
2336 tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
2337 regs[2] = tmp;
2338 b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, tmp | 0x0600);
2339
2340 tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
2341 regs[3] = tmp;
2342 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp | 0x0600);
2343
2344 regs[4] = b43_phy_read(dev, B43_NPHY_BBCFG);
2345 b43_phy_mask(dev, B43_NPHY_BBCFG, (u16)~B43_NPHY_BBCFG_RSTRX);
2346
2347 tmp = b43_ntab_read(dev, B43_NTAB16(8, 3));
2348 regs[5] = tmp;
2349 b43_ntab_write(dev, B43_NTAB16(8, 3), 0);
2350
2351 tmp = b43_ntab_read(dev, B43_NTAB16(8, 19));
2352 regs[6] = tmp;
2353 b43_ntab_write(dev, B43_NTAB16(8, 19), 0);
2354 regs[7] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
2355 regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
2356
2357 b43_nphy_rf_control_intc_override(dev, 2, 1, 3);
2358 b43_nphy_rf_control_intc_override(dev, 1, 2, 1);
2359 b43_nphy_rf_control_intc_override(dev, 1, 8, 2);
2360
2361 regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0);
2362 regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1);
2363 b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001);
2364 b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001);
2365 } else {
2366 b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, 0xA000);
2367 b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, 0xA000);
2368 tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
2369 regs[2] = tmp;
2370 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp | 0x3000);
2371 tmp = b43_ntab_read(dev, B43_NTAB16(8, 2));
2372 regs[3] = tmp;
2373 tmp |= 0x2000;
2374 b43_ntab_write(dev, B43_NTAB16(8, 2), tmp);
2375 tmp = b43_ntab_read(dev, B43_NTAB16(8, 18));
2376 regs[4] = tmp;
2377 tmp |= 0x2000;
2378 b43_ntab_write(dev, B43_NTAB16(8, 18), tmp);
2379 regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
2380 regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
2381 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
2382 tmp = 0x0180;
2383 else
2384 tmp = 0x0120;
2385 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp);
2386 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp);
2387 }
2388}
2389
2390/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SaveCal */
2391static void b43_nphy_save_cal(struct b43_wldev *dev)
2392{
2393 struct b43_phy_n *nphy = dev->phy.n;
2394
2395 struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
2396 u16 *txcal_radio_regs = NULL;
2397 u8 *iqcal_chanspec;
2398 u16 *table = NULL;
2399
2400 if (nphy->hang_avoid)
2401 b43_nphy_stay_in_carrier_search(dev, 1);
2402
2403 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
2404 rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
2405 txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
2406 iqcal_chanspec = &nphy->iqcal_chanspec_2G;
2407 table = nphy->cal_cache.txcal_coeffs_2G;
2408 } else {
2409 rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_5G;
2410 txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_5G;
2411 iqcal_chanspec = &nphy->iqcal_chanspec_5G;
2412 table = nphy->cal_cache.txcal_coeffs_5G;
2413 }
2414
2415 b43_nphy_rx_iq_coeffs(dev, false, rxcal_coeffs);
2416 /* TODO use some definitions */
2417 if (dev->phy.rev >= 3) {
2418 txcal_radio_regs[0] = b43_radio_read(dev, 0x2021);
2419 txcal_radio_regs[1] = b43_radio_read(dev, 0x2022);
2420 txcal_radio_regs[2] = b43_radio_read(dev, 0x3021);
2421 txcal_radio_regs[3] = b43_radio_read(dev, 0x3022);
2422 txcal_radio_regs[4] = b43_radio_read(dev, 0x2023);
2423 txcal_radio_regs[5] = b43_radio_read(dev, 0x2024);
2424 txcal_radio_regs[6] = b43_radio_read(dev, 0x3023);
2425 txcal_radio_regs[7] = b43_radio_read(dev, 0x3024);
2426 } else {
2427 txcal_radio_regs[0] = b43_radio_read(dev, 0x8B);
2428 txcal_radio_regs[1] = b43_radio_read(dev, 0xBA);
2429 txcal_radio_regs[2] = b43_radio_read(dev, 0x8D);
2430 txcal_radio_regs[3] = b43_radio_read(dev, 0xBC);
2431 }
2432 *iqcal_chanspec = nphy->radio_chanspec;
2433 b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 8, table);
2434
2435 if (nphy->hang_avoid)
2436 b43_nphy_stay_in_carrier_search(dev, 0);
2437}
2438
2439/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreCal */
2440static void b43_nphy_restore_cal(struct b43_wldev *dev)
2441{
2442 struct b43_phy_n *nphy = dev->phy.n;
2443
2444 u16 coef[4];
2445 u16 *loft = NULL;
2446 u16 *table = NULL;
2447
2448 int i;
2449 u16 *txcal_radio_regs = NULL;
2450 struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
2451
2452 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
2453 if (nphy->iqcal_chanspec_2G == 0)
2454 return;
2455 table = nphy->cal_cache.txcal_coeffs_2G;
2456 loft = &nphy->cal_cache.txcal_coeffs_2G[5];
2457 } else {
2458 if (nphy->iqcal_chanspec_5G == 0)
2459 return;
2460 table = nphy->cal_cache.txcal_coeffs_5G;
2461 loft = &nphy->cal_cache.txcal_coeffs_5G[5];
2462 }
2463
2464 b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 4, table);
2465
2466 for (i = 0; i < 4; i++) {
2467 if (dev->phy.rev >= 3)
2468 table[i] = coef[i];
2469 else
2470 coef[i] = 0;
2471 }
2472
2473 b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4, coef);
2474 b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2, loft);
2475 b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2, loft);
2476
2477 if (dev->phy.rev < 2)
2478 b43_nphy_tx_iq_workaround(dev);
2479
2480 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
2481 txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
2482 rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
2483 } else {
2484 txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_5G;
2485 rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_5G;
2486 }
2487
2488 /* TODO use some definitions */
2489 if (dev->phy.rev >= 3) {
2490 b43_radio_write(dev, 0x2021, txcal_radio_regs[0]);
2491 b43_radio_write(dev, 0x2022, txcal_radio_regs[1]);
2492 b43_radio_write(dev, 0x3021, txcal_radio_regs[2]);
2493 b43_radio_write(dev, 0x3022, txcal_radio_regs[3]);
2494 b43_radio_write(dev, 0x2023, txcal_radio_regs[4]);
2495 b43_radio_write(dev, 0x2024, txcal_radio_regs[5]);
2496 b43_radio_write(dev, 0x3023, txcal_radio_regs[6]);
2497 b43_radio_write(dev, 0x3024, txcal_radio_regs[7]);
2498 } else {
2499 b43_radio_write(dev, 0x8B, txcal_radio_regs[0]);
2500 b43_radio_write(dev, 0xBA, txcal_radio_regs[1]);
2501 b43_radio_write(dev, 0x8D, txcal_radio_regs[2]);
2502 b43_radio_write(dev, 0xBC, txcal_radio_regs[3]);
2503 }
2504 b43_nphy_rx_iq_coeffs(dev, true, rxcal_coeffs);
2505}
2506
2507/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalTxIqlo */
2508static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
2509 struct nphy_txgains target,
2510 bool full, bool mphase)
2511{
2512 struct b43_phy_n *nphy = dev->phy.n;
2513 int i;
2514 int error = 0;
2515 int freq;
2516 bool avoid = false;
2517 u8 length;
2518 u16 tmp, core, type, count, max, numb, last, cmd;
2519 const u16 *table;
2520 bool phy6or5x;
2521
2522 u16 buffer[11];
2523 u16 diq_start = 0;
2524 u16 save[2];
2525 u16 gain[2];
2526 struct nphy_iqcal_params params[2];
2527 bool updated[2] = { };
2528
2529 b43_nphy_stay_in_carrier_search(dev, true);
2530
2531 if (dev->phy.rev >= 4) {
2532 avoid = nphy->hang_avoid;
2533 nphy->hang_avoid = 0;
2534 }
2535
2536 b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, save);
2537
2538 for (i = 0; i < 2; i++) {
2539 b43_nphy_iq_cal_gain_params(dev, i, target, &params[i]);
2540 gain[i] = params[i].cal_gain;
2541 }
2542
2543 b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, gain);
2544
2545 b43_nphy_tx_cal_radio_setup(dev);
2546 b43_nphy_tx_cal_phy_setup(dev);
2547
2548 phy6or5x = dev->phy.rev >= 6 ||
2549 (dev->phy.rev == 5 && nphy->ipa2g_on &&
2550 b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ);
2551 if (phy6or5x) {
2552 if (dev->phy.is_40mhz) {
2553 b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18,
2554 tbl_tx_iqlo_cal_loft_ladder_40);
2555 b43_ntab_write_bulk(dev, B43_NTAB16(15, 32), 18,
2556 tbl_tx_iqlo_cal_iqimb_ladder_40);
2557 } else {
2558 b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18,
2559 tbl_tx_iqlo_cal_loft_ladder_20);
2560 b43_ntab_write_bulk(dev, B43_NTAB16(15, 32), 18,
2561 tbl_tx_iqlo_cal_iqimb_ladder_20);
2562 }
2563 }
2564
2565 b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8AA9);
2566
2567 if (!dev->phy.is_40mhz)
2568 freq = 2500;
2569 else
2570 freq = 5000;
2571
2572 if (nphy->mphase_cal_phase_id > 2)
2573 b43_nphy_run_samples(dev, (dev->phy.is_40mhz ? 40 : 20) * 8,
2574 0xFFFF, 0, true, false);
2575 else
2576 error = b43_nphy_tx_tone(dev, freq, 250, true, false);
2577
2578 if (error == 0) {
2579 if (nphy->mphase_cal_phase_id > 2) {
2580 table = nphy->mphase_txcal_bestcoeffs;
2581 length = 11;
2582 if (dev->phy.rev < 3)
2583 length -= 2;
2584 } else {
2585 if (!full && nphy->txiqlocal_coeffsvalid) {
2586 table = nphy->txiqlocal_bestc;
2587 length = 11;
2588 if (dev->phy.rev < 3)
2589 length -= 2;
2590 } else {
2591 full = true;
2592 if (dev->phy.rev >= 3) {
2593 table = tbl_tx_iqlo_cal_startcoefs_nphyrev3;
2594 length = B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3;
2595 } else {
2596 table = tbl_tx_iqlo_cal_startcoefs;
2597 length = B43_NTAB_TX_IQLO_CAL_STARTCOEFS;
2598 }
2599 }
2600 }
2601
2602 b43_ntab_write_bulk(dev, B43_NTAB16(15, 64), length, table);
2603
2604 if (full) {
2605 if (dev->phy.rev >= 3)
2606 max = B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL_REV3;
2607 else
2608 max = B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL;
2609 } else {
2610 if (dev->phy.rev >= 3)
2611 max = B43_NTAB_TX_IQLO_CAL_CMDS_RECAL_REV3;
2612 else
2613 max = B43_NTAB_TX_IQLO_CAL_CMDS_RECAL;
2614 }
2615
2616 if (mphase) {
2617 count = nphy->mphase_txcal_cmdidx;
2618 numb = min(max,
2619 (u16)(count + nphy->mphase_txcal_numcmds));
2620 } else {
2621 count = 0;
2622 numb = max;
2623 }
2624
2625 for (; count < numb; count++) {
2626 if (full) {
2627 if (dev->phy.rev >= 3)
2628 cmd = tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[count];
2629 else
2630 cmd = tbl_tx_iqlo_cal_cmds_fullcal[count];
2631 } else {
2632 if (dev->phy.rev >= 3)
2633 cmd = tbl_tx_iqlo_cal_cmds_recal_nphyrev3[count];
2634 else
2635 cmd = tbl_tx_iqlo_cal_cmds_recal[count];
2636 }
2637
2638 core = (cmd & 0x3000) >> 12;
2639 type = (cmd & 0x0F00) >> 8;
2640
2641 if (phy6or5x && updated[core] == 0) {
2642 b43_nphy_update_tx_cal_ladder(dev, core);
2643 updated[core] = 1;
2644 }
2645
2646 tmp = (params[core].ncorr[type] << 8) | 0x66;
2647 b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDNNUM, tmp);
2648
2649 if (type == 1 || type == 3 || type == 4) {
2650 buffer[0] = b43_ntab_read(dev,
2651 B43_NTAB16(15, 69 + core));
2652 diq_start = buffer[0];
2653 buffer[0] = 0;
2654 b43_ntab_write(dev, B43_NTAB16(15, 69 + core),
2655 0);
2656 }
2657
2658 b43_phy_write(dev, B43_NPHY_IQLOCAL_CMD, cmd);
2659 for (i = 0; i < 2000; i++) {
2660 tmp = b43_phy_read(dev, B43_NPHY_IQLOCAL_CMD);
2661 if (tmp & 0xC000)
2662 break;
2663 udelay(10);
2664 }
2665
2666 b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length,
2667 buffer);
2668 b43_ntab_write_bulk(dev, B43_NTAB16(15, 64), length,
2669 buffer);
2670
2671 if (type == 1 || type == 3 || type == 4)
2672 buffer[0] = diq_start;
2673 }
2674
2675 if (mphase)
2676 nphy->mphase_txcal_cmdidx = (numb >= max) ? 0 : numb;
2677
2678 last = (dev->phy.rev < 3) ? 6 : 7;
2679
2680 if (!mphase || nphy->mphase_cal_phase_id == last) {
2681 b43_ntab_write_bulk(dev, B43_NTAB16(15, 96), 4, buffer);
2682 b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 4, buffer);
2683 if (dev->phy.rev < 3) {
2684 buffer[0] = 0;
2685 buffer[1] = 0;
2686 buffer[2] = 0;
2687 buffer[3] = 0;
2688 }
2689 b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4,
2690 buffer);
2691 b43_ntab_write_bulk(dev, B43_NTAB16(15, 101), 2,
2692 buffer);
2693 b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2,
2694 buffer);
2695 b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2,
2696 buffer);
2697 length = 11;
2698 if (dev->phy.rev < 3)
2699 length -= 2;
2700 b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length,
2701 nphy->txiqlocal_bestc);
2702 nphy->txiqlocal_coeffsvalid = true;
2703 /* TODO: Set nphy->txiqlocal_chanspec to
2704 the current channel */
2705 } else {
2706 length = 11;
2707 if (dev->phy.rev < 3)
2708 length -= 2;
2709 b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length,
2710 nphy->mphase_txcal_bestcoeffs);
2711 }
2712
2713 b43_nphy_stop_playback(dev);
2714 b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0);
2715 }
2716
2717 b43_nphy_tx_cal_phy_cleanup(dev);
2718 b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, save);
2719
2720 if (dev->phy.rev < 2 && (!mphase || nphy->mphase_cal_phase_id == last))
2721 b43_nphy_tx_iq_workaround(dev);
2722
2723 if (dev->phy.rev >= 4)
2724 nphy->hang_avoid = avoid;
2725
2726 b43_nphy_stay_in_carrier_search(dev, false);
2727
2728 return error;
2729}
2730
2731/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ReapplyTxCalCoeffs */
2732static void b43_nphy_reapply_tx_cal_coeffs(struct b43_wldev *dev)
2733{
2734 struct b43_phy_n *nphy = dev->phy.n;
2735 u8 i;
2736 u16 buffer[7];
2737 bool equal = true;
2738
2739 if (!nphy->txiqlocal_coeffsvalid || 1 /* FIXME */)
2740 return;
2741
2742 b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
2743 for (i = 0; i < 4; i++) {
2744 if (buffer[i] != nphy->txiqlocal_bestc[i]) {
2745 equal = false;
2746 break;
2747 }
2748 }
2749
2750 if (!equal) {
2751 b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 4,
2752 nphy->txiqlocal_bestc);
2753 for (i = 0; i < 4; i++)
2754 buffer[i] = 0;
2755 b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4,
2756 buffer);
2757 b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2,
2758 &nphy->txiqlocal_bestc[5]);
2759 b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2,
2760 &nphy->txiqlocal_bestc[5]);
2761 }
2762}
2763
2764/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIqRev2 */
2765static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
2766 struct nphy_txgains target, u8 type, bool debug)
2767{
2768 struct b43_phy_n *nphy = dev->phy.n;
2769 int i, j, index;
2770 u8 rfctl[2];
2771 u8 afectl_core;
2772 u16 tmp[6];
2773 u16 cur_hpf1, cur_hpf2, cur_lna;
2774 u32 real, imag;
2775 enum ieee80211_band band;
2776
2777 u8 use;
2778 u16 cur_hpf;
2779 u16 lna[3] = { 3, 3, 1 };
2780 u16 hpf1[3] = { 7, 2, 0 };
2781 u16 hpf2[3] = { 2, 0, 0 };
2782 u32 power[3] = { };
2783 u16 gain_save[2];
2784 u16 cal_gain[2];
2785 struct nphy_iqcal_params cal_params[2];
2786 struct nphy_iq_est est;
2787 int ret = 0;
2788 bool playtone = true;
2789 int desired = 13;
2790
2791 b43_nphy_stay_in_carrier_search(dev, 1);
2792
2793 if (dev->phy.rev < 2)
2794 b43_nphy_reapply_tx_cal_coeffs(dev);
2795 b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, gain_save);
2796 for (i = 0; i < 2; i++) {
2797 b43_nphy_iq_cal_gain_params(dev, i, target, &cal_params[i]);
2798 cal_gain[i] = cal_params[i].cal_gain;
2799 }
2800 b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, cal_gain);
2801
2802 for (i = 0; i < 2; i++) {
2803 if (i == 0) {
2804 rfctl[0] = B43_NPHY_RFCTL_INTC1;
2805 rfctl[1] = B43_NPHY_RFCTL_INTC2;
2806 afectl_core = B43_NPHY_AFECTL_C1;
2807 } else {
2808 rfctl[0] = B43_NPHY_RFCTL_INTC2;
2809 rfctl[1] = B43_NPHY_RFCTL_INTC1;
2810 afectl_core = B43_NPHY_AFECTL_C2;
2811 }
2812
2813 tmp[1] = b43_phy_read(dev, B43_NPHY_RFSEQCA);
2814 tmp[2] = b43_phy_read(dev, afectl_core);
2815 tmp[3] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
2816 tmp[4] = b43_phy_read(dev, rfctl[0]);
2817 tmp[5] = b43_phy_read(dev, rfctl[1]);
2818
2819 b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
2820 (u16)~B43_NPHY_RFSEQCA_RXDIS,
2821 ((1 - i) << B43_NPHY_RFSEQCA_RXDIS_SHIFT));
2822 b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN,
2823 (1 - i));
2824 b43_phy_set(dev, afectl_core, 0x0006);
2825 b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0006);
2826
2827 band = b43_current_band(dev->wl);
2828
2829 if (nphy->rxcalparams & 0xFF000000) {
2830 if (band == IEEE80211_BAND_5GHZ)
2831 b43_phy_write(dev, rfctl[0], 0x140);
2832 else
2833 b43_phy_write(dev, rfctl[0], 0x110);
2834 } else {
2835 if (band == IEEE80211_BAND_5GHZ)
2836 b43_phy_write(dev, rfctl[0], 0x180);
2837 else
2838 b43_phy_write(dev, rfctl[0], 0x120);
2839 }
2840
2841 if (band == IEEE80211_BAND_5GHZ)
2842 b43_phy_write(dev, rfctl[1], 0x148);
2843 else
2844 b43_phy_write(dev, rfctl[1], 0x114);
2845
2846 if (nphy->rxcalparams & 0x10000) {
2847 b43_radio_maskset(dev, B2055_C1_GENSPARE2, 0xFC,
2848 (i + 1));
2849 b43_radio_maskset(dev, B2055_C2_GENSPARE2, 0xFC,
2850 (2 - i));
2851 }
2852
2853 for (j = 0; i < 4; j++) {
2854 if (j < 3) {
2855 cur_lna = lna[j];
2856 cur_hpf1 = hpf1[j];
2857 cur_hpf2 = hpf2[j];
2858 } else {
2859 if (power[1] > 10000) {
2860 use = 1;
2861 cur_hpf = cur_hpf1;
2862 index = 2;
2863 } else {
2864 if (power[0] > 10000) {
2865 use = 1;
2866 cur_hpf = cur_hpf1;
2867 index = 1;
2868 } else {
2869 index = 0;
2870 use = 2;
2871 cur_hpf = cur_hpf2;
2872 }
2873 }
2874 cur_lna = lna[index];
2875 cur_hpf1 = hpf1[index];
2876 cur_hpf2 = hpf2[index];
2877 cur_hpf += desired - hweight32(power[index]);
2878 cur_hpf = clamp_val(cur_hpf, 0, 10);
2879 if (use == 1)
2880 cur_hpf1 = cur_hpf;
2881 else
2882 cur_hpf2 = cur_hpf;
2883 }
2884
2885 tmp[0] = ((cur_hpf2 << 8) | (cur_hpf1 << 4) |
2886 (cur_lna << 2));
2887 b43_nphy_rf_control_override(dev, 0x400, tmp[0], 3,
2888 false);
2889 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
2890 b43_nphy_stop_playback(dev);
2891
2892 if (playtone) {
2893 ret = b43_nphy_tx_tone(dev, 4000,
2894 (nphy->rxcalparams & 0xFFFF),
2895 false, false);
2896 playtone = false;
2897 } else {
2898 b43_nphy_run_samples(dev, 160, 0xFFFF, 0,
2899 false, false);
2900 }
2901
2902 if (ret == 0) {
2903 if (j < 3) {
2904 b43_nphy_rx_iq_est(dev, &est, 1024, 32,
2905 false);
2906 if (i == 0) {
2907 real = est.i0_pwr;
2908 imag = est.q0_pwr;
2909 } else {
2910 real = est.i1_pwr;
2911 imag = est.q1_pwr;
2912 }
2913 power[i] = ((real + imag) / 1024) + 1;
2914 } else {
2915 b43_nphy_calc_rx_iq_comp(dev, 1 << i);
2916 }
2917 b43_nphy_stop_playback(dev);
2918 }
2919
2920 if (ret != 0)
2921 break;
2922 }
2923
2924 b43_radio_mask(dev, B2055_C1_GENSPARE2, 0xFC);
2925 b43_radio_mask(dev, B2055_C2_GENSPARE2, 0xFC);
2926 b43_phy_write(dev, rfctl[1], tmp[5]);
2927 b43_phy_write(dev, rfctl[0], tmp[4]);
2928 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp[3]);
2929 b43_phy_write(dev, afectl_core, tmp[2]);
2930 b43_phy_write(dev, B43_NPHY_RFSEQCA, tmp[1]);
2931
2932 if (ret != 0)
2933 break;
2934 }
2935
2936 b43_nphy_rf_control_override(dev, 0x400, 0, 3, true);
2937 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
2938 b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, gain_save);
2939
2940 b43_nphy_stay_in_carrier_search(dev, 0);
2941
2942 return ret;
2943}
2944
2945static int b43_nphy_rev3_cal_rx_iq(struct b43_wldev *dev,
2946 struct nphy_txgains target, u8 type, bool debug)
2947{
2948 return -1;
2949}
2950
2951/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIq */
2952static int b43_nphy_cal_rx_iq(struct b43_wldev *dev,
2953 struct nphy_txgains target, u8 type, bool debug)
2954{
2955 if (dev->phy.rev >= 3)
2956 return b43_nphy_rev3_cal_rx_iq(dev, target, type, debug);
2957 else
2958 return b43_nphy_rev2_cal_rx_iq(dev, target, type, debug);
2959}
2960
2961/*
2962 * Init N-PHY
2963 * http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N
2964 */
420int b43_phy_initn(struct b43_wldev *dev) 2965int b43_phy_initn(struct b43_wldev *dev)
421{ 2966{
2967 struct ssb_bus *bus = dev->dev->bus;
422 struct b43_phy *phy = &dev->phy; 2968 struct b43_phy *phy = &dev->phy;
2969 struct b43_phy_n *nphy = phy->n;
2970 u8 tx_pwr_state;
2971 struct nphy_txgains target;
423 u16 tmp; 2972 u16 tmp;
2973 enum ieee80211_band tmp2;
2974 bool do_rssi_cal;
424 2975
425 //TODO: Spectral management 2976 u16 clip[2];
2977 bool do_cal = false;
2978
2979 if ((dev->phy.rev >= 3) &&
2980 (bus->sprom.boardflags_lo & B43_BFL_EXTLNA) &&
2981 (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) {
2982 chipco_set32(&dev->dev->bus->chipco, SSB_CHIPCO_CHIPCTL, 0x40);
2983 }
2984 nphy->deaf_count = 0;
426 b43_nphy_tables_init(dev); 2985 b43_nphy_tables_init(dev);
2986 nphy->crsminpwr_adjusted = false;
2987 nphy->noisevars_adjusted = false;
427 2988
428 /* Clear all overrides */ 2989 /* Clear all overrides */
429 b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0); 2990 if (dev->phy.rev >= 3) {
2991 b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, 0);
2992 b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0);
2993 b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, 0);
2994 b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, 0);
2995 } else {
2996 b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0);
2997 }
430 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, 0); 2998 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, 0);
431 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, 0); 2999 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, 0);
432 b43_phy_write(dev, B43_NPHY_RFCTL_INTC3, 0); 3000 if (dev->phy.rev < 6) {
433 b43_phy_write(dev, B43_NPHY_RFCTL_INTC4, 0); 3001 b43_phy_write(dev, B43_NPHY_RFCTL_INTC3, 0);
3002 b43_phy_write(dev, B43_NPHY_RFCTL_INTC4, 0);
3003 }
434 b43_phy_mask(dev, B43_NPHY_RFSEQMODE, 3004 b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
435 ~(B43_NPHY_RFSEQMODE_CAOVER | 3005 ~(B43_NPHY_RFSEQMODE_CAOVER |
436 B43_NPHY_RFSEQMODE_TROVER)); 3006 B43_NPHY_RFSEQMODE_TROVER));
3007 if (dev->phy.rev >= 3)
3008 b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, 0);
437 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, 0); 3009 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, 0);
438 3010
439 tmp = (phy->rev < 2) ? 64 : 59; 3011 if (dev->phy.rev <= 2) {
440 b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, 3012 tmp = (dev->phy.rev == 2) ? 0x3B : 0x40;
441 ~B43_NPHY_BPHY_CTL3_SCALE, 3013 b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
442 tmp << B43_NPHY_BPHY_CTL3_SCALE_SHIFT); 3014 ~B43_NPHY_BPHY_CTL3_SCALE,
443 3015 tmp << B43_NPHY_BPHY_CTL3_SCALE_SHIFT);
3016 }
444 b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_20M, 0x20); 3017 b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_20M, 0x20);
445 b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_40M, 0x20); 3018 b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_40M, 0x20);
446 3019
447 b43_phy_write(dev, B43_NPHY_TXREALFD, 184); 3020 if (bus->sprom.boardflags2_lo & 0x100 ||
448 b43_phy_write(dev, B43_NPHY_MIMO_CRSTXEXT, 200); 3021 (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE &&
449 b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 80); 3022 bus->boardinfo.type == 0x8B))
450 b43_phy_write(dev, B43_NPHY_C2_BCLIPBKOFF, 511); 3023 b43_phy_write(dev, B43_NPHY_TXREALFD, 0xA0);
3024 else
3025 b43_phy_write(dev, B43_NPHY_TXREALFD, 0xB8);
3026 b43_phy_write(dev, B43_NPHY_MIMO_CRSTXEXT, 0xC8);
3027 b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x50);
3028 b43_phy_write(dev, B43_NPHY_TXRIFS_FRDEL, 0x30);
451 3029
452 //TODO MIMO-Config 3030 b43_nphy_update_mimo_config(dev, nphy->preamble_override);
453 //TODO Update TX/RX chain 3031 b43_nphy_update_txrx_chain(dev);
454 3032
455 if (phy->rev < 2) { 3033 if (phy->rev < 2) {
456 b43_phy_write(dev, B43_NPHY_DUP40_GFBL, 0xAA8); 3034 b43_phy_write(dev, B43_NPHY_DUP40_GFBL, 0xAA8);
457 b43_phy_write(dev, B43_NPHY_DUP40_BL, 0x9A4); 3035 b43_phy_write(dev, B43_NPHY_DUP40_BL, 0x9A4);
458 } 3036 }
3037
3038 tmp2 = b43_current_band(dev->wl);
3039 if ((nphy->ipa2g_on && tmp2 == IEEE80211_BAND_2GHZ) ||
3040 (nphy->ipa5g_on && tmp2 == IEEE80211_BAND_5GHZ)) {
3041 b43_phy_set(dev, B43_NPHY_PAPD_EN0, 0x1);
3042 b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ0, 0x007F,
3043 nphy->papd_epsilon_offset[0] << 7);
3044 b43_phy_set(dev, B43_NPHY_PAPD_EN1, 0x1);
3045 b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ1, 0x007F,
3046 nphy->papd_epsilon_offset[1] << 7);
3047 b43_nphy_int_pa_set_tx_dig_filters(dev);
3048 } else if (phy->rev >= 5) {
3049 b43_nphy_ext_pa_set_tx_dig_filters(dev);
3050 }
3051
459 b43_nphy_workarounds(dev); 3052 b43_nphy_workarounds(dev);
460 b43_nphy_reset_cca(dev);
461 3053
462 ssb_write32(dev->dev, SSB_TMSLOW, 3054 /* Reset CCA, in init code it differs a little from standard way */
463 ssb_read32(dev->dev, SSB_TMSLOW) | B43_TMSLOW_MACPHYCLKEN); 3055 b43_nphy_bmac_clock_fgc(dev, 1);
3056 tmp = b43_phy_read(dev, B43_NPHY_BBCFG);
3057 b43_phy_write(dev, B43_NPHY_BBCFG, tmp | B43_NPHY_BBCFG_RSTCCA);
3058 b43_phy_write(dev, B43_NPHY_BBCFG, tmp & ~B43_NPHY_BBCFG_RSTCCA);
3059 b43_nphy_bmac_clock_fgc(dev, 0);
3060
3061 /* TODO N PHY MAC PHY Clock Set with argument 1 */
3062
3063 b43_nphy_pa_override(dev, false);
464 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX); 3064 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
465 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); 3065 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
3066 b43_nphy_pa_override(dev, true);
3067
3068 b43_nphy_classifier(dev, 0, 0);
3069 b43_nphy_read_clip_detection(dev, clip);
3070 tx_pwr_state = nphy->txpwrctrl;
3071 /* TODO N PHY TX power control with argument 0
3072 (turning off power control) */
3073 /* TODO Fix the TX Power Settings */
3074 /* TODO N PHY TX Power Control Idle TSSI */
3075 /* TODO N PHY TX Power Control Setup */
3076
3077 if (phy->rev >= 3) {
3078 /* TODO */
3079 } else {
3080 b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128,
3081 b43_ntab_tx_gain_rev0_1_2);
3082 b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128,
3083 b43_ntab_tx_gain_rev0_1_2);
3084 }
3085
3086 if (nphy->phyrxchain != 3)
3087 ;/* TODO N PHY RX Core Set State with phyrxchain as argument */
3088 if (nphy->mphase_cal_phase_id > 0)
3089 ;/* TODO PHY Periodic Calibration Multi-Phase Restart */
3090
3091 do_rssi_cal = false;
3092 if (phy->rev >= 3) {
3093 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
3094 do_rssi_cal = (nphy->rssical_chanspec_2G == 0);
3095 else
3096 do_rssi_cal = (nphy->rssical_chanspec_5G == 0);
3097
3098 if (do_rssi_cal)
3099 b43_nphy_rssi_cal(dev);
3100 else
3101 b43_nphy_restore_rssi_cal(dev);
3102 } else {
3103 b43_nphy_rssi_cal(dev);
3104 }
3105
3106 if (!((nphy->measure_hold & 0x6) != 0)) {
3107 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
3108 do_cal = (nphy->iqcal_chanspec_2G == 0);
3109 else
3110 do_cal = (nphy->iqcal_chanspec_5G == 0);
3111
3112 if (nphy->mute)
3113 do_cal = false;
3114
3115 if (do_cal) {
3116 target = b43_nphy_get_tx_gains(dev);
3117
3118 if (nphy->antsel_type == 2)
3119 ;/*TODO NPHY Superswitch Init with argument 1*/
3120 if (nphy->perical != 2) {
3121 b43_nphy_rssi_cal(dev);
3122 if (phy->rev >= 3) {
3123 nphy->cal_orig_pwr_idx[0] =
3124 nphy->txpwrindex[0].index_internal;
3125 nphy->cal_orig_pwr_idx[1] =
3126 nphy->txpwrindex[1].index_internal;
3127 /* TODO N PHY Pre Calibrate TX Gain */
3128 target = b43_nphy_get_tx_gains(dev);
3129 }
3130 }
3131 }
3132 }
3133
3134 if (!b43_nphy_cal_tx_iq_lo(dev, target, true, false)) {
3135 if (b43_nphy_cal_rx_iq(dev, target, 2, 0) == 0)
3136 b43_nphy_save_cal(dev);
3137 else if (nphy->mphase_cal_phase_id == 0)
3138 ;/* N PHY Periodic Calibration with argument 3 */
3139 } else {
3140 b43_nphy_restore_cal(dev);
3141 }
466 3142
467 b43_phy_read(dev, B43_NPHY_CLASSCTL); /* dummy read */ 3143 b43_nphy_tx_pwr_ctrl_coef_setup(dev);
468 //TODO read core1/2 clip1 thres regs 3144 /* TODO N PHY TX Power Control Enable with argument tx_pwr_state */
469 3145 b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015);
470 if (1 /* FIXME Band is 2.4GHz */) 3146 b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320);
471 b43_nphy_bphy_init(dev); 3147 if (phy->rev >= 3 && phy->rev <= 6)
472 //TODO disable TX power control 3148 b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0014);
473 //TODO Fix the TX power settings 3149 b43_nphy_tx_lp_fbw(dev);
474 //TODO Init periodic calibration with reason 3 3150 if (phy->rev >= 3)
475 b43_nphy_rssi_cal(dev, 2); 3151 b43_nphy_spur_workaround(dev);
476 b43_nphy_rssi_cal(dev, 0);
477 b43_nphy_rssi_cal(dev, 1);
478 //TODO get TX gain
479 //TODO init superswitch
480 //TODO calibrate LO
481 //TODO idle TSSI TX pctl
482 //TODO TX power control power setup
483 //TODO table writes
484 //TODO TX power control coefficients
485 //TODO enable TX power control
486 //TODO control antenna selection
487 //TODO init radar detection
488 //TODO reset channel if changed
489 3152
490 b43err(dev->wl, "IEEE 802.11n devices are not supported, yet.\n"); 3153 b43err(dev->wl, "IEEE 802.11n devices are not supported, yet.\n");
491 return 0; 3154 return 0;
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index 1749aef4147d..403aad3f894f 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -231,6 +231,7 @@
231#define B43_NPHY_C2_TXIQ_COMP_OFF B43_PHY_N(0x088) /* Core 2 TX I/Q comp offset */ 231#define B43_NPHY_C2_TXIQ_COMP_OFF B43_PHY_N(0x088) /* Core 2 TX I/Q comp offset */
232#define B43_NPHY_C1_TXCTL B43_PHY_N(0x08B) /* Core 1 TX control */ 232#define B43_NPHY_C1_TXCTL B43_PHY_N(0x08B) /* Core 1 TX control */
233#define B43_NPHY_C2_TXCTL B43_PHY_N(0x08C) /* Core 2 TX control */ 233#define B43_NPHY_C2_TXCTL B43_PHY_N(0x08C) /* Core 2 TX control */
234#define B43_NPHY_AFECTL_OVER1 B43_PHY_N(0x08F) /* AFE control override 1 */
234#define B43_NPHY_SCRAM_SIGCTL B43_PHY_N(0x090) /* Scram signal control */ 235#define B43_NPHY_SCRAM_SIGCTL B43_PHY_N(0x090) /* Scram signal control */
235#define B43_NPHY_SCRAM_SIGCTL_INITST 0x007F /* Initial state value */ 236#define B43_NPHY_SCRAM_SIGCTL_INITST 0x007F /* Initial state value */
236#define B43_NPHY_SCRAM_SIGCTL_INITST_SHIFT 0 237#define B43_NPHY_SCRAM_SIGCTL_INITST_SHIFT 0
@@ -705,6 +706,10 @@
705#define B43_NPHY_TXPCTL_INIT B43_PHY_N(0x222) /* TX power controll init */ 706#define B43_NPHY_TXPCTL_INIT B43_PHY_N(0x222) /* TX power controll init */
706#define B43_NPHY_TXPCTL_INIT_PIDXI1 0x00FF /* Power index init 1 */ 707#define B43_NPHY_TXPCTL_INIT_PIDXI1 0x00FF /* Power index init 1 */
707#define B43_NPHY_TXPCTL_INIT_PIDXI1_SHIFT 0 708#define B43_NPHY_TXPCTL_INIT_PIDXI1_SHIFT 0
709#define B43_NPHY_PAPD_EN0 B43_PHY_N(0x297) /* PAPD Enable0 TBD */
710#define B43_NPHY_EPS_TABLE_ADJ0 B43_PHY_N(0x298) /* EPS Table Adj0 TBD */
711#define B43_NPHY_PAPD_EN1 B43_PHY_N(0x29B) /* PAPD Enable1 TBD */
712#define B43_NPHY_EPS_TABLE_ADJ1 B43_PHY_N(0x29C) /* EPS Table Adj1 TBD */
708 713
709 714
710 715
@@ -919,8 +924,99 @@
919 924
920struct b43_wldev; 925struct b43_wldev;
921 926
927struct b43_phy_n_iq_comp {
928 s16 a0;
929 s16 b0;
930 s16 a1;
931 s16 b1;
932};
933
934struct b43_phy_n_rssical_cache {
935 u16 rssical_radio_regs_2G[2];
936 u16 rssical_phy_regs_2G[12];
937
938 u16 rssical_radio_regs_5G[2];
939 u16 rssical_phy_regs_5G[12];
940};
941
942struct b43_phy_n_cal_cache {
943 u16 txcal_radio_regs_2G[8];
944 u16 txcal_coeffs_2G[8];
945 struct b43_phy_n_iq_comp rxcal_coeffs_2G;
946
947 u16 txcal_radio_regs_5G[8];
948 u16 txcal_coeffs_5G[8];
949 struct b43_phy_n_iq_comp rxcal_coeffs_5G;
950};
951
952struct b43_phy_n_txpwrindex {
953 s8 index;
954 s8 index_internal;
955 s8 index_internal_save;
956 u16 AfectrlOverride;
957 u16 AfeCtrlDacGain;
958 u16 rad_gain;
959 u8 bbmult;
960 u16 iqcomp_a;
961 u16 iqcomp_b;
962 u16 locomp;
963};
964
922struct b43_phy_n { 965struct b43_phy_n {
923 //TODO lots of missing stuff 966 u8 antsel_type;
967 u8 cal_orig_pwr_idx[2];
968 u8 measure_hold;
969 u8 phyrxchain;
970 u8 perical;
971 u32 deaf_count;
972 u32 rxcalparams;
973 bool hang_avoid;
974 bool mute;
975 u16 papd_epsilon_offset[2];
976 s32 preamble_override;
977 u32 bb_mult_save;
978 u16 radio_chanspec;
979
980 bool gain_boost;
981 bool elna_gain_config;
982 bool band5g_pwrgain;
983
984 u8 mphase_cal_phase_id;
985 u16 mphase_txcal_cmdidx;
986 u16 mphase_txcal_numcmds;
987 u16 mphase_txcal_bestcoeffs[11];
988
989 u8 txpwrctrl;
990 u16 txcal_bbmult;
991 u16 txiqlocal_bestc[11];
992 bool txiqlocal_coeffsvalid;
993 struct b43_phy_n_txpwrindex txpwrindex[2];
994
995 u8 txrx_chain;
996 u16 tx_rx_cal_phy_saveregs[11];
997 u16 tx_rx_cal_radio_saveregs[22];
998
999 u16 rfctrl_intc1_save;
1000 u16 rfctrl_intc2_save;
1001
1002 u16 classifier_state;
1003 u16 clip_state[2];
1004
1005 bool aband_spurwar_en;
1006 bool gband_spurwar_en;
1007
1008 bool ipa2g_on;
1009 u8 iqcal_chanspec_2G;
1010 u8 rssical_chanspec_2G;
1011
1012 bool ipa5g_on;
1013 u8 iqcal_chanspec_5G;
1014 u8 rssical_chanspec_5G;
1015
1016 struct b43_phy_n_rssical_cache rssical_cache;
1017 struct b43_phy_n_cal_cache cal_cache;
1018 bool crsminpwr_adjusted;
1019 bool noisevars_adjusted;
924}; 1020};
925 1021
926 1022
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index c01b8e02412f..a6062c3e89a5 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -559,7 +559,6 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
559 b43err(dev->wl, "PIO transmission failure\n"); 559 b43err(dev->wl, "PIO transmission failure\n");
560 goto out; 560 goto out;
561 } 561 }
562 q->nr_tx_packets++;
563 562
564 B43_WARN_ON(q->buffer_used > q->buffer_size); 563 B43_WARN_ON(q->buffer_used > q->buffer_size);
565 if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) || 564 if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
@@ -605,22 +604,6 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
605 } 604 }
606} 605}
607 606
608void b43_pio_get_tx_stats(struct b43_wldev *dev,
609 struct ieee80211_tx_queue_stats *stats)
610{
611 const int nr_queues = dev->wl->hw->queues;
612 struct b43_pio_txqueue *q;
613 int i;
614
615 for (i = 0; i < nr_queues; i++) {
616 q = select_queue_by_priority(dev, i);
617
618 stats[i].len = B43_PIO_MAX_NR_TXPACKETS - q->free_packet_slots;
619 stats[i].limit = B43_PIO_MAX_NR_TXPACKETS;
620 stats[i].count = q->nr_tx_packets;
621 }
622}
623
624/* Returns whether we should fetch another frame. */ 607/* Returns whether we should fetch another frame. */
625static bool pio_rx_frame(struct b43_pio_rxqueue *q) 608static bool pio_rx_frame(struct b43_pio_rxqueue *q)
626{ 609{
diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h
index 7dd649c9ddad..1e516147424f 100644
--- a/drivers/net/wireless/b43/pio.h
+++ b/drivers/net/wireless/b43/pio.h
@@ -55,8 +55,6 @@
55#define B43_PIO_MAX_NR_TXPACKETS 32 55#define B43_PIO_MAX_NR_TXPACKETS 32
56 56
57 57
58#ifdef CONFIG_B43_PIO
59
60struct b43_pio_txpacket { 58struct b43_pio_txpacket {
61 /* Pointer to the TX queue we belong to. */ 59 /* Pointer to the TX queue we belong to. */
62 struct b43_pio_txqueue *queue; 60 struct b43_pio_txqueue *queue;
@@ -92,9 +90,6 @@ struct b43_pio_txqueue {
92 struct b43_pio_txpacket packets[B43_PIO_MAX_NR_TXPACKETS]; 90 struct b43_pio_txpacket packets[B43_PIO_MAX_NR_TXPACKETS];
93 struct list_head packets_list; 91 struct list_head packets_list;
94 92
95 /* Total number of transmitted packets. */
96 unsigned int nr_tx_packets;
97
98 /* Shortcut to the 802.11 core revision. This is to 93 /* Shortcut to the 802.11 core revision. This is to
99 * avoid horrible pointer dereferencing in the fastpaths. */ 94 * avoid horrible pointer dereferencing in the fastpaths. */
100 u8 rev; 95 u8 rev;
@@ -162,49 +157,9 @@ void b43_pio_free(struct b43_wldev *dev);
162int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb); 157int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb);
163void b43_pio_handle_txstatus(struct b43_wldev *dev, 158void b43_pio_handle_txstatus(struct b43_wldev *dev,
164 const struct b43_txstatus *status); 159 const struct b43_txstatus *status);
165void b43_pio_get_tx_stats(struct b43_wldev *dev,
166 struct ieee80211_tx_queue_stats *stats);
167void b43_pio_rx(struct b43_pio_rxqueue *q); 160void b43_pio_rx(struct b43_pio_rxqueue *q);
168 161
169void b43_pio_tx_suspend(struct b43_wldev *dev); 162void b43_pio_tx_suspend(struct b43_wldev *dev);
170void b43_pio_tx_resume(struct b43_wldev *dev); 163void b43_pio_tx_resume(struct b43_wldev *dev);
171 164
172
173#else /* CONFIG_B43_PIO */
174
175
176static inline int b43_pio_init(struct b43_wldev *dev)
177{
178 return 0;
179}
180static inline void b43_pio_free(struct b43_wldev *dev)
181{
182}
183static inline void b43_pio_stop(struct b43_wldev *dev)
184{
185}
186static inline int b43_pio_tx(struct b43_wldev *dev,
187 struct sk_buff *skb)
188{
189 return 0;
190}
191static inline void b43_pio_handle_txstatus(struct b43_wldev *dev,
192 const struct b43_txstatus *status)
193{
194}
195static inline void b43_pio_get_tx_stats(struct b43_wldev *dev,
196 struct ieee80211_tx_queue_stats *stats)
197{
198}
199static inline void b43_pio_rx(struct b43_pio_rxqueue *q)
200{
201}
202static inline void b43_pio_tx_suspend(struct b43_wldev *dev)
203{
204}
205static inline void b43_pio_tx_resume(struct b43_wldev *dev)
206{
207}
208
209#endif /* CONFIG_B43_PIO */
210#endif /* B43_PIO_H_ */ 165#endif /* B43_PIO_H_ */
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 4e2336315545..a00d509150f7 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -1336,7 +1336,7 @@ b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel)
1336} 1336}
1337 1337
1338 1338
1339const u8 b43_ntab_adjustpower0[] = { 1339static const u8 b43_ntab_adjustpower0[] = {
1340 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 1340 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,
1341 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 1341 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03,
1342 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05, 1342 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05,
@@ -1355,7 +1355,7 @@ const u8 b43_ntab_adjustpower0[] = {
1355 0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F, 1355 0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F,
1356}; 1356};
1357 1357
1358const u8 b43_ntab_adjustpower1[] = { 1358static const u8 b43_ntab_adjustpower1[] = {
1359 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 1359 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,
1360 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 1360 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03,
1361 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05, 1361 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05,
@@ -1374,11 +1374,11 @@ const u8 b43_ntab_adjustpower1[] = {
1374 0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F, 1374 0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F,
1375}; 1375};
1376 1376
1377const u16 b43_ntab_bdi[] = { 1377static const u16 b43_ntab_bdi[] = {
1378 0x0070, 0x0126, 0x012C, 0x0246, 0x048D, 0x04D2, 1378 0x0070, 0x0126, 0x012C, 0x0246, 0x048D, 0x04D2,
1379}; 1379};
1380 1380
1381const u32 b43_ntab_channelest[] = { 1381static const u32 b43_ntab_channelest[] = {
1382 0x44444444, 0x44444444, 0x44444444, 0x44444444, 1382 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1383 0x44444444, 0x44444444, 0x44444444, 0x44444444, 1383 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1384 0x10101010, 0x10101010, 0x10101010, 0x10101010, 1384 0x10101010, 0x10101010, 0x10101010, 0x10101010,
@@ -1405,7 +1405,7 @@ const u32 b43_ntab_channelest[] = {
1405 0x10101010, 0x10101010, 0x10101010, 0x10101010, 1405 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1406}; 1406};
1407 1407
1408const u8 b43_ntab_estimatepowerlt0[] = { 1408static const u8 b43_ntab_estimatepowerlt0[] = {
1409 0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49, 1409 0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49,
1410 0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41, 1410 0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41,
1411 0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 1411 0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39,
@@ -1416,7 +1416,7 @@ const u8 b43_ntab_estimatepowerlt0[] = {
1416 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 1416 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11,
1417}; 1417};
1418 1418
1419const u8 b43_ntab_estimatepowerlt1[] = { 1419static const u8 b43_ntab_estimatepowerlt1[] = {
1420 0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49, 1420 0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49,
1421 0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41, 1421 0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41,
1422 0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 1422 0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39,
@@ -1427,14 +1427,14 @@ const u8 b43_ntab_estimatepowerlt1[] = {
1427 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 1427 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11,
1428}; 1428};
1429 1429
1430const u8 b43_ntab_framelookup[] = { 1430static const u8 b43_ntab_framelookup[] = {
1431 0x02, 0x04, 0x14, 0x14, 0x03, 0x05, 0x16, 0x16, 1431 0x02, 0x04, 0x14, 0x14, 0x03, 0x05, 0x16, 0x16,
1432 0x0A, 0x0C, 0x1C, 0x1C, 0x0B, 0x0D, 0x1E, 0x1E, 1432 0x0A, 0x0C, 0x1C, 0x1C, 0x0B, 0x0D, 0x1E, 0x1E,
1433 0x06, 0x08, 0x18, 0x18, 0x07, 0x09, 0x1A, 0x1A, 1433 0x06, 0x08, 0x18, 0x18, 0x07, 0x09, 0x1A, 0x1A,
1434 0x0E, 0x10, 0x20, 0x28, 0x0F, 0x11, 0x22, 0x2A, 1434 0x0E, 0x10, 0x20, 0x28, 0x0F, 0x11, 0x22, 0x2A,
1435}; 1435};
1436 1436
1437const u32 b43_ntab_framestruct[] = { 1437static const u32 b43_ntab_framestruct[] = {
1438 0x08004A04, 0x00100000, 0x01000A05, 0x00100020, 1438 0x08004A04, 0x00100000, 0x01000A05, 0x00100020,
1439 0x09804506, 0x00100030, 0x09804507, 0x00100030, 1439 0x09804506, 0x00100030, 0x09804507, 0x00100030,
1440 0x00000000, 0x00000000, 0x00000000, 0x00000000, 1440 0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -1645,7 +1645,7 @@ const u32 b43_ntab_framestruct[] = {
1645 0x00000000, 0x00000000, 0x00000000, 0x00000000, 1645 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1646}; 1646};
1647 1647
1648const u32 b43_ntab_gainctl0[] = { 1648static const u32 b43_ntab_gainctl0[] = {
1649 0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E, 1649 0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E,
1650 0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C, 1650 0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C,
1651 0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A, 1651 0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A,
@@ -1680,7 +1680,7 @@ const u32 b43_ntab_gainctl0[] = {
1680 0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00, 1680 0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00,
1681}; 1681};
1682 1682
1683const u32 b43_ntab_gainctl1[] = { 1683static const u32 b43_ntab_gainctl1[] = {
1684 0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E, 1684 0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E,
1685 0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C, 1685 0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C,
1686 0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A, 1686 0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A,
@@ -1715,12 +1715,12 @@ const u32 b43_ntab_gainctl1[] = {
1715 0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00, 1715 0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00,
1716}; 1716};
1717 1717
1718const u32 b43_ntab_intlevel[] = { 1718static const u32 b43_ntab_intlevel[] = {
1719 0x00802070, 0x0671188D, 0x0A60192C, 0x0A300E46, 1719 0x00802070, 0x0671188D, 0x0A60192C, 0x0A300E46,
1720 0x00C1188D, 0x080024D2, 0x00000070, 1720 0x00C1188D, 0x080024D2, 0x00000070,
1721}; 1721};
1722 1722
1723const u32 b43_ntab_iqlt0[] = { 1723static const u32 b43_ntab_iqlt0[] = {
1724 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1724 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
1725 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1725 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
1726 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1726 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
@@ -1755,7 +1755,7 @@ const u32 b43_ntab_iqlt0[] = {
1755 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1755 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
1756}; 1756};
1757 1757
1758const u32 b43_ntab_iqlt1[] = { 1758static const u32 b43_ntab_iqlt1[] = {
1759 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1759 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
1760 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1760 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
1761 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1761 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
@@ -1790,7 +1790,7 @@ const u32 b43_ntab_iqlt1[] = {
1790 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1790 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
1791}; 1791};
1792 1792
1793const u16 b43_ntab_loftlt0[] = { 1793static const u16 b43_ntab_loftlt0[] = {
1794 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 1794 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
1795 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 1795 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103,
1796 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 1796 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
@@ -1815,7 +1815,7 @@ const u16 b43_ntab_loftlt0[] = {
1815 0x0002, 0x0103, 1815 0x0002, 0x0103,
1816}; 1816};
1817 1817
1818const u16 b43_ntab_loftlt1[] = { 1818static const u16 b43_ntab_loftlt1[] = {
1819 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 1819 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
1820 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 1820 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103,
1821 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 1821 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
@@ -1840,7 +1840,7 @@ const u16 b43_ntab_loftlt1[] = {
1840 0x0002, 0x0103, 1840 0x0002, 0x0103,
1841}; 1841};
1842 1842
1843const u8 b43_ntab_mcs[] = { 1843static const u8 b43_ntab_mcs[] = {
1844 0x00, 0x08, 0x0A, 0x10, 0x12, 0x19, 0x1A, 0x1C, 1844 0x00, 0x08, 0x0A, 0x10, 0x12, 0x19, 0x1A, 0x1C,
1845 0x40, 0x48, 0x4A, 0x50, 0x52, 0x59, 0x5A, 0x5C, 1845 0x40, 0x48, 0x4A, 0x50, 0x52, 0x59, 0x5A, 0x5C,
1846 0x80, 0x88, 0x8A, 0x90, 0x92, 0x99, 0x9A, 0x9C, 1846 0x80, 0x88, 0x8A, 0x90, 0x92, 0x99, 0x9A, 0x9C,
@@ -1859,7 +1859,7 @@ const u8 b43_ntab_mcs[] = {
1859 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1859 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1860}; 1860};
1861 1861
1862const u32 b43_ntab_noisevar10[] = { 1862static const u32 b43_ntab_noisevar10[] = {
1863 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1863 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
1864 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1864 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
1865 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1865 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
@@ -1926,7 +1926,7 @@ const u32 b43_ntab_noisevar10[] = {
1926 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1926 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
1927}; 1927};
1928 1928
1929const u32 b43_ntab_noisevar11[] = { 1929static const u32 b43_ntab_noisevar11[] = {
1930 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1930 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
1931 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1931 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
1932 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1932 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
@@ -1993,7 +1993,7 @@ const u32 b43_ntab_noisevar11[] = {
1993 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1993 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
1994}; 1994};
1995 1995
1996const u16 b43_ntab_pilot[] = { 1996static const u16 b43_ntab_pilot[] = {
1997 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08, 1997 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08,
1998 0xFF08, 0xFF08, 0x80D5, 0x80D5, 0x80D5, 0x80D5, 1998 0xFF08, 0xFF08, 0x80D5, 0x80D5, 0x80D5, 0x80D5,
1999 0x80D5, 0x80D5, 0x80D5, 0x80D5, 0xFF0A, 0xFF82, 1999 0x80D5, 0x80D5, 0x80D5, 0x80D5, 0xFF0A, 0xFF82,
@@ -2011,12 +2011,12 @@ const u16 b43_ntab_pilot[] = {
2011 0xF0A0, 0xF028, 0xFFFF, 0xFFFF, 2011 0xF0A0, 0xF028, 0xFFFF, 0xFFFF,
2012}; 2012};
2013 2013
2014const u32 b43_ntab_pilotlt[] = { 2014static const u32 b43_ntab_pilotlt[] = {
2015 0x76540123, 0x62407351, 0x76543201, 0x76540213, 2015 0x76540123, 0x62407351, 0x76543201, 0x76540213,
2016 0x76540123, 0x76430521, 2016 0x76540123, 0x76430521,
2017}; 2017};
2018 2018
2019const u32 b43_ntab_tdi20a0[] = { 2019static const u32 b43_ntab_tdi20a0[] = {
2020 0x00091226, 0x000A1429, 0x000B56AD, 0x000C58B0, 2020 0x00091226, 0x000A1429, 0x000B56AD, 0x000C58B0,
2021 0x000D5AB3, 0x000E9CB6, 0x000F9EBA, 0x0000C13D, 2021 0x000D5AB3, 0x000E9CB6, 0x000F9EBA, 0x0000C13D,
2022 0x00020301, 0x00030504, 0x00040708, 0x0005090B, 2022 0x00020301, 0x00030504, 0x00040708, 0x0005090B,
@@ -2033,7 +2033,7 @@ const u32 b43_ntab_tdi20a0[] = {
2033 0x00000000, 0x00000000, 0x00000000, 2033 0x00000000, 0x00000000, 0x00000000,
2034}; 2034};
2035 2035
2036const u32 b43_ntab_tdi20a1[] = { 2036static const u32 b43_ntab_tdi20a1[] = {
2037 0x00014B26, 0x00028D29, 0x000393AD, 0x00049630, 2037 0x00014B26, 0x00028D29, 0x000393AD, 0x00049630,
2038 0x0005D833, 0x0006DA36, 0x00099C3A, 0x000A9E3D, 2038 0x0005D833, 0x0006DA36, 0x00099C3A, 0x000A9E3D,
2039 0x000BC081, 0x000CC284, 0x000DC488, 0x000F068B, 2039 0x000BC081, 0x000CC284, 0x000DC488, 0x000F068B,
@@ -2050,7 +2050,7 @@ const u32 b43_ntab_tdi20a1[] = {
2050 0x00000000, 0x00000000, 0x00000000, 2050 0x00000000, 0x00000000, 0x00000000,
2051}; 2051};
2052 2052
2053const u32 b43_ntab_tdi40a0[] = { 2053static const u32 b43_ntab_tdi40a0[] = {
2054 0x0011A346, 0x00136CCF, 0x0014F5D9, 0x001641E2, 2054 0x0011A346, 0x00136CCF, 0x0014F5D9, 0x001641E2,
2055 0x0017CB6B, 0x00195475, 0x001B2383, 0x001CAD0C, 2055 0x0017CB6B, 0x00195475, 0x001B2383, 0x001CAD0C,
2056 0x001E7616, 0x0000821F, 0x00020BA8, 0x0003D4B2, 2056 0x001E7616, 0x0000821F, 0x00020BA8, 0x0003D4B2,
@@ -2081,7 +2081,7 @@ const u32 b43_ntab_tdi40a0[] = {
2081 0x00000000, 0x00000000, 2081 0x00000000, 0x00000000,
2082}; 2082};
2083 2083
2084const u32 b43_ntab_tdi40a1[] = { 2084static const u32 b43_ntab_tdi40a1[] = {
2085 0x001EDB36, 0x000129CA, 0x0002B353, 0x00047CDD, 2085 0x001EDB36, 0x000129CA, 0x0002B353, 0x00047CDD,
2086 0x0005C8E6, 0x000791EF, 0x00091BF9, 0x000AAA07, 2086 0x0005C8E6, 0x000791EF, 0x00091BF9, 0x000AAA07,
2087 0x000C3391, 0x000DFD1A, 0x00120923, 0x0013D22D, 2087 0x000C3391, 0x000DFD1A, 0x00120923, 0x0013D22D,
@@ -2112,7 +2112,7 @@ const u32 b43_ntab_tdi40a1[] = {
2112 0x00000000, 0x00000000, 2112 0x00000000, 0x00000000,
2113}; 2113};
2114 2114
2115const u32 b43_ntab_tdtrn[] = { 2115static const u32 b43_ntab_tdtrn[] = {
2116 0x061C061C, 0x0050EE68, 0xF592FE36, 0xFE5212F6, 2116 0x061C061C, 0x0050EE68, 0xF592FE36, 0xFE5212F6,
2117 0x00000C38, 0xFE5212F6, 0xF592FE36, 0x0050EE68, 2117 0x00000C38, 0xFE5212F6, 0xF592FE36, 0x0050EE68,
2118 0x061C061C, 0xEE680050, 0xFE36F592, 0x12F6FE52, 2118 0x061C061C, 0xEE680050, 0xFE36F592, 0x12F6FE52,
@@ -2291,7 +2291,7 @@ const u32 b43_ntab_tdtrn[] = {
2291 0xFA58FC00, 0x0B64FC7E, 0x0800F7B6, 0x00F006BE, 2291 0xFA58FC00, 0x0B64FC7E, 0x0800F7B6, 0x00F006BE,
2292}; 2292};
2293 2293
2294const u32 b43_ntab_tmap[] = { 2294static const u32 b43_ntab_tmap[] = {
2295 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888, 2295 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888,
2296 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, 2296 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8,
2297 0xF1111110, 0x11111111, 0x11F11111, 0x00000111, 2297 0xF1111110, 0x11111111, 0x11F11111, 0x00000111,
@@ -2406,6 +2406,544 @@ const u32 b43_ntab_tmap[] = {
2406 0x00000000, 0x00000000, 0x00000000, 0x00000000, 2406 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2407}; 2407};
2408 2408
2409const u32 b43_ntab_tx_gain_rev0_1_2[] = {
2410 0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
2411 0x03cc2944, 0x03c82b44, 0x03c82b42, 0x03c82a44,
2412 0x03c82a42, 0x03c82944, 0x03c82942, 0x03c82844,
2413 0x03c82842, 0x03c42b44, 0x03c42b42, 0x03c42a44,
2414 0x03c42a42, 0x03c42944, 0x03c42942, 0x03c42844,
2415 0x03c42842, 0x03c42744, 0x03c42742, 0x03c42644,
2416 0x03c42642, 0x03c42544, 0x03c42542, 0x03c42444,
2417 0x03c42442, 0x03c02b44, 0x03c02b42, 0x03c02a44,
2418 0x03c02a42, 0x03c02944, 0x03c02942, 0x03c02844,
2419 0x03c02842, 0x03c02744, 0x03c02742, 0x03b02b44,
2420 0x03b02b42, 0x03b02a44, 0x03b02a42, 0x03b02944,
2421 0x03b02942, 0x03b02844, 0x03b02842, 0x03b02744,
2422 0x03b02742, 0x03b02644, 0x03b02642, 0x03b02544,
2423 0x03b02542, 0x03a02b44, 0x03a02b42, 0x03a02a44,
2424 0x03a02a42, 0x03a02944, 0x03a02942, 0x03a02844,
2425 0x03a02842, 0x03a02744, 0x03a02742, 0x03902b44,
2426 0x03902b42, 0x03902a44, 0x03902a42, 0x03902944,
2427 0x03902942, 0x03902844, 0x03902842, 0x03902744,
2428 0x03902742, 0x03902644, 0x03902642, 0x03902544,
2429 0x03902542, 0x03802b44, 0x03802b42, 0x03802a44,
2430 0x03802a42, 0x03802944, 0x03802942, 0x03802844,
2431 0x03802842, 0x03802744, 0x03802742, 0x03802644,
2432 0x03802642, 0x03802544, 0x03802542, 0x03802444,
2433 0x03802442, 0x03802344, 0x03802342, 0x03802244,
2434 0x03802242, 0x03802144, 0x03802142, 0x03802044,
2435 0x03802042, 0x03801f44, 0x03801f42, 0x03801e44,
2436 0x03801e42, 0x03801d44, 0x03801d42, 0x03801c44,
2437 0x03801c42, 0x03801b44, 0x03801b42, 0x03801a44,
2438 0x03801a42, 0x03801944, 0x03801942, 0x03801844,
2439 0x03801842, 0x03801744, 0x03801742, 0x03801644,
2440 0x03801642, 0x03801544, 0x03801542, 0x03801444,
2441 0x03801442, 0x03801344, 0x03801342, 0x00002b00,
2442};
2443
2444const u32 b43_ntab_tx_gain_rev3plus_2ghz[] = {
2445 0x1f410044, 0x1f410042, 0x1f410040, 0x1f41003e,
2446 0x1f41003c, 0x1f41003b, 0x1f410039, 0x1f410037,
2447 0x1e410044, 0x1e410042, 0x1e410040, 0x1e41003e,
2448 0x1e41003c, 0x1e41003b, 0x1e410039, 0x1e410037,
2449 0x1d410044, 0x1d410042, 0x1d410040, 0x1d41003e,
2450 0x1d41003c, 0x1d41003b, 0x1d410039, 0x1d410037,
2451 0x1c410044, 0x1c410042, 0x1c410040, 0x1c41003e,
2452 0x1c41003c, 0x1c41003b, 0x1c410039, 0x1c410037,
2453 0x1b410044, 0x1b410042, 0x1b410040, 0x1b41003e,
2454 0x1b41003c, 0x1b41003b, 0x1b410039, 0x1b410037,
2455 0x1a410044, 0x1a410042, 0x1a410040, 0x1a41003e,
2456 0x1a41003c, 0x1a41003b, 0x1a410039, 0x1a410037,
2457 0x19410044, 0x19410042, 0x19410040, 0x1941003e,
2458 0x1941003c, 0x1941003b, 0x19410039, 0x19410037,
2459 0x18410044, 0x18410042, 0x18410040, 0x1841003e,
2460 0x1841003c, 0x1841003b, 0x18410039, 0x18410037,
2461 0x17410044, 0x17410042, 0x17410040, 0x1741003e,
2462 0x1741003c, 0x1741003b, 0x17410039, 0x17410037,
2463 0x16410044, 0x16410042, 0x16410040, 0x1641003e,
2464 0x1641003c, 0x1641003b, 0x16410039, 0x16410037,
2465 0x15410044, 0x15410042, 0x15410040, 0x1541003e,
2466 0x1541003c, 0x1541003b, 0x15410039, 0x15410037,
2467 0x14410044, 0x14410042, 0x14410040, 0x1441003e,
2468 0x1441003c, 0x1441003b, 0x14410039, 0x14410037,
2469 0x13410044, 0x13410042, 0x13410040, 0x1341003e,
2470 0x1341003c, 0x1341003b, 0x13410039, 0x13410037,
2471 0x12410044, 0x12410042, 0x12410040, 0x1241003e,
2472 0x1241003c, 0x1241003b, 0x12410039, 0x12410037,
2473 0x11410044, 0x11410042, 0x11410040, 0x1141003e,
2474 0x1141003c, 0x1141003b, 0x11410039, 0x11410037,
2475 0x10410044, 0x10410042, 0x10410040, 0x1041003e,
2476 0x1041003c, 0x1041003b, 0x10410039, 0x10410037,
2477};
2478
2479const u32 b43_ntab_tx_gain_rev3_5ghz[] = {
2480 0xcff70044, 0xcff70042, 0xcff70040, 0xcff7003e,
2481 0xcff7003c, 0xcff7003b, 0xcff70039, 0xcff70037,
2482 0xcef70044, 0xcef70042, 0xcef70040, 0xcef7003e,
2483 0xcef7003c, 0xcef7003b, 0xcef70039, 0xcef70037,
2484 0xcdf70044, 0xcdf70042, 0xcdf70040, 0xcdf7003e,
2485 0xcdf7003c, 0xcdf7003b, 0xcdf70039, 0xcdf70037,
2486 0xccf70044, 0xccf70042, 0xccf70040, 0xccf7003e,
2487 0xccf7003c, 0xccf7003b, 0xccf70039, 0xccf70037,
2488 0xcbf70044, 0xcbf70042, 0xcbf70040, 0xcbf7003e,
2489 0xcbf7003c, 0xcbf7003b, 0xcbf70039, 0xcbf70037,
2490 0xcaf70044, 0xcaf70042, 0xcaf70040, 0xcaf7003e,
2491 0xcaf7003c, 0xcaf7003b, 0xcaf70039, 0xcaf70037,
2492 0xc9f70044, 0xc9f70042, 0xc9f70040, 0xc9f7003e,
2493 0xc9f7003c, 0xc9f7003b, 0xc9f70039, 0xc9f70037,
2494 0xc8f70044, 0xc8f70042, 0xc8f70040, 0xc8f7003e,
2495 0xc8f7003c, 0xc8f7003b, 0xc8f70039, 0xc8f70037,
2496 0xc7f70044, 0xc7f70042, 0xc7f70040, 0xc7f7003e,
2497 0xc7f7003c, 0xc7f7003b, 0xc7f70039, 0xc7f70037,
2498 0xc6f70044, 0xc6f70042, 0xc6f70040, 0xc6f7003e,
2499 0xc6f7003c, 0xc6f7003b, 0xc6f70039, 0xc6f70037,
2500 0xc5f70044, 0xc5f70042, 0xc5f70040, 0xc5f7003e,
2501 0xc5f7003c, 0xc5f7003b, 0xc5f70039, 0xc5f70037,
2502 0xc4f70044, 0xc4f70042, 0xc4f70040, 0xc4f7003e,
2503 0xc4f7003c, 0xc4f7003b, 0xc4f70039, 0xc4f70037,
2504 0xc3f70044, 0xc3f70042, 0xc3f70040, 0xc3f7003e,
2505 0xc3f7003c, 0xc3f7003b, 0xc3f70039, 0xc3f70037,
2506 0xc2f70044, 0xc2f70042, 0xc2f70040, 0xc2f7003e,
2507 0xc2f7003c, 0xc2f7003b, 0xc2f70039, 0xc2f70037,
2508 0xc1f70044, 0xc1f70042, 0xc1f70040, 0xc1f7003e,
2509 0xc1f7003c, 0xc1f7003b, 0xc1f70039, 0xc1f70037,
2510 0xc0f70044, 0xc0f70042, 0xc0f70040, 0xc0f7003e,
2511 0xc0f7003c, 0xc0f7003b, 0xc0f70039, 0xc0f70037,
2512};
2513
2514const u32 b43_ntab_tx_gain_rev4_5ghz[] = {
2515 0x2ff20044, 0x2ff20042, 0x2ff20040, 0x2ff2003e,
2516 0x2ff2003c, 0x2ff2003b, 0x2ff20039, 0x2ff20037,
2517 0x2ef20044, 0x2ef20042, 0x2ef20040, 0x2ef2003e,
2518 0x2ef2003c, 0x2ef2003b, 0x2ef20039, 0x2ef20037,
2519 0x2df20044, 0x2df20042, 0x2df20040, 0x2df2003e,
2520 0x2df2003c, 0x2df2003b, 0x2df20039, 0x2df20037,
2521 0x2cf20044, 0x2cf20042, 0x2cf20040, 0x2cf2003e,
2522 0x2cf2003c, 0x2cf2003b, 0x2cf20039, 0x2cf20037,
2523 0x2bf20044, 0x2bf20042, 0x2bf20040, 0x2bf2003e,
2524 0x2bf2003c, 0x2bf2003b, 0x2bf20039, 0x2bf20037,
2525 0x2af20044, 0x2af20042, 0x2af20040, 0x2af2003e,
2526 0x2af2003c, 0x2af2003b, 0x2af20039, 0x2af20037,
2527 0x29f20044, 0x29f20042, 0x29f20040, 0x29f2003e,
2528 0x29f2003c, 0x29f2003b, 0x29f20039, 0x29f20037,
2529 0x28f20044, 0x28f20042, 0x28f20040, 0x28f2003e,
2530 0x28f2003c, 0x28f2003b, 0x28f20039, 0x28f20037,
2531 0x27f20044, 0x27f20042, 0x27f20040, 0x27f2003e,
2532 0x27f2003c, 0x27f2003b, 0x27f20039, 0x27f20037,
2533 0x26f20044, 0x26f20042, 0x26f20040, 0x26f2003e,
2534 0x26f2003c, 0x26f2003b, 0x26f20039, 0x26f20037,
2535 0x25f20044, 0x25f20042, 0x25f20040, 0x25f2003e,
2536 0x25f2003c, 0x25f2003b, 0x25f20039, 0x25f20037,
2537 0x24f20044, 0x24f20042, 0x24f20040, 0x24f2003e,
2538 0x24f2003c, 0x24f2003b, 0x24f20039, 0x24f20038,
2539 0x23f20041, 0x23f20040, 0x23f2003f, 0x23f2003e,
2540 0x23f2003c, 0x23f2003b, 0x23f20039, 0x23f20037,
2541 0x22f20044, 0x22f20042, 0x22f20040, 0x22f2003e,
2542 0x22f2003c, 0x22f2003b, 0x22f20039, 0x22f20037,
2543 0x21f20044, 0x21f20042, 0x21f20040, 0x21f2003e,
2544 0x21f2003c, 0x21f2003b, 0x21f20039, 0x21f20037,
2545 0x20d20043, 0x20d20041, 0x20d2003e, 0x20d2003c,
2546 0x20d2003a, 0x20d20038, 0x20d20036, 0x20d20034,
2547};
2548
2549const u32 b43_ntab_tx_gain_rev5plus_5ghz[] = {
2550 0x0f62004a, 0x0f620048, 0x0f620046, 0x0f620044,
2551 0x0f620042, 0x0f620040, 0x0f62003e, 0x0f62003c,
2552 0x0e620044, 0x0e620042, 0x0e620040, 0x0e62003e,
2553 0x0e62003c, 0x0e62003d, 0x0e62003b, 0x0e62003a,
2554 0x0d620043, 0x0d620041, 0x0d620040, 0x0d62003e,
2555 0x0d62003d, 0x0d62003c, 0x0d62003b, 0x0d62003a,
2556 0x0c620041, 0x0c620040, 0x0c62003f, 0x0c62003e,
2557 0x0c62003c, 0x0c62003b, 0x0c620039, 0x0c620037,
2558 0x0b620046, 0x0b620044, 0x0b620042, 0x0b620040,
2559 0x0b62003e, 0x0b62003c, 0x0b62003b, 0x0b62003a,
2560 0x0a620041, 0x0a620040, 0x0a62003e, 0x0a62003c,
2561 0x0a62003b, 0x0a62003a, 0x0a620039, 0x0a620038,
2562 0x0962003e, 0x0962003d, 0x0962003c, 0x0962003b,
2563 0x09620039, 0x09620037, 0x09620035, 0x09620033,
2564 0x08620044, 0x08620042, 0x08620040, 0x0862003e,
2565 0x0862003c, 0x0862003b, 0x0862003a, 0x08620039,
2566 0x07620043, 0x07620042, 0x07620040, 0x0762003f,
2567 0x0762003d, 0x0762003b, 0x0762003a, 0x07620039,
2568 0x0662003e, 0x0662003d, 0x0662003c, 0x0662003b,
2569 0x06620039, 0x06620037, 0x06620035, 0x06620033,
2570 0x05620046, 0x05620044, 0x05620042, 0x05620040,
2571 0x0562003e, 0x0562003c, 0x0562003b, 0x05620039,
2572 0x04620044, 0x04620042, 0x04620040, 0x0462003e,
2573 0x0462003c, 0x0462003b, 0x04620039, 0x04620038,
2574 0x0362003c, 0x0362003b, 0x0362003a, 0x03620039,
2575 0x03620038, 0x03620037, 0x03620035, 0x03620033,
2576 0x0262004c, 0x0262004a, 0x02620048, 0x02620047,
2577 0x02620046, 0x02620044, 0x02620043, 0x02620042,
2578 0x0162004a, 0x01620048, 0x01620046, 0x01620044,
2579 0x01620043, 0x01620042, 0x01620041, 0x01620040,
2580 0x00620042, 0x00620040, 0x0062003e, 0x0062003c,
2581 0x0062003b, 0x00620039, 0x00620037, 0x00620035,
2582};
2583
2584const u32 txpwrctrl_tx_gain_ipa[] = {
2585 0x5ff7002d, 0x5ff7002b, 0x5ff7002a, 0x5ff70029,
2586 0x5ff70028, 0x5ff70027, 0x5ff70026, 0x5ff70025,
2587 0x5ef7002d, 0x5ef7002b, 0x5ef7002a, 0x5ef70029,
2588 0x5ef70028, 0x5ef70027, 0x5ef70026, 0x5ef70025,
2589 0x5df7002d, 0x5df7002b, 0x5df7002a, 0x5df70029,
2590 0x5df70028, 0x5df70027, 0x5df70026, 0x5df70025,
2591 0x5cf7002d, 0x5cf7002b, 0x5cf7002a, 0x5cf70029,
2592 0x5cf70028, 0x5cf70027, 0x5cf70026, 0x5cf70025,
2593 0x5bf7002d, 0x5bf7002b, 0x5bf7002a, 0x5bf70029,
2594 0x5bf70028, 0x5bf70027, 0x5bf70026, 0x5bf70025,
2595 0x5af7002d, 0x5af7002b, 0x5af7002a, 0x5af70029,
2596 0x5af70028, 0x5af70027, 0x5af70026, 0x5af70025,
2597 0x59f7002d, 0x59f7002b, 0x59f7002a, 0x59f70029,
2598 0x59f70028, 0x59f70027, 0x59f70026, 0x59f70025,
2599 0x58f7002d, 0x58f7002b, 0x58f7002a, 0x58f70029,
2600 0x58f70028, 0x58f70027, 0x58f70026, 0x58f70025,
2601 0x57f7002d, 0x57f7002b, 0x57f7002a, 0x57f70029,
2602 0x57f70028, 0x57f70027, 0x57f70026, 0x57f70025,
2603 0x56f7002d, 0x56f7002b, 0x56f7002a, 0x56f70029,
2604 0x56f70028, 0x56f70027, 0x56f70026, 0x56f70025,
2605 0x55f7002d, 0x55f7002b, 0x55f7002a, 0x55f70029,
2606 0x55f70028, 0x55f70027, 0x55f70026, 0x55f70025,
2607 0x54f7002d, 0x54f7002b, 0x54f7002a, 0x54f70029,
2608 0x54f70028, 0x54f70027, 0x54f70026, 0x54f70025,
2609 0x53f7002d, 0x53f7002b, 0x53f7002a, 0x53f70029,
2610 0x53f70028, 0x53f70027, 0x53f70026, 0x53f70025,
2611 0x52f7002d, 0x52f7002b, 0x52f7002a, 0x52f70029,
2612 0x52f70028, 0x52f70027, 0x52f70026, 0x52f70025,
2613 0x51f7002d, 0x51f7002b, 0x51f7002a, 0x51f70029,
2614 0x51f70028, 0x51f70027, 0x51f70026, 0x51f70025,
2615 0x50f7002d, 0x50f7002b, 0x50f7002a, 0x50f70029,
2616 0x50f70028, 0x50f70027, 0x50f70026, 0x50f70025,
2617};
2618
2619const u32 txpwrctrl_tx_gain_ipa_rev5[] = {
2620 0x1ff7002d, 0x1ff7002b, 0x1ff7002a, 0x1ff70029,
2621 0x1ff70028, 0x1ff70027, 0x1ff70026, 0x1ff70025,
2622 0x1ef7002d, 0x1ef7002b, 0x1ef7002a, 0x1ef70029,
2623 0x1ef70028, 0x1ef70027, 0x1ef70026, 0x1ef70025,
2624 0x1df7002d, 0x1df7002b, 0x1df7002a, 0x1df70029,
2625 0x1df70028, 0x1df70027, 0x1df70026, 0x1df70025,
2626 0x1cf7002d, 0x1cf7002b, 0x1cf7002a, 0x1cf70029,
2627 0x1cf70028, 0x1cf70027, 0x1cf70026, 0x1cf70025,
2628 0x1bf7002d, 0x1bf7002b, 0x1bf7002a, 0x1bf70029,
2629 0x1bf70028, 0x1bf70027, 0x1bf70026, 0x1bf70025,
2630 0x1af7002d, 0x1af7002b, 0x1af7002a, 0x1af70029,
2631 0x1af70028, 0x1af70027, 0x1af70026, 0x1af70025,
2632 0x19f7002d, 0x19f7002b, 0x19f7002a, 0x19f70029,
2633 0x19f70028, 0x19f70027, 0x19f70026, 0x19f70025,
2634 0x18f7002d, 0x18f7002b, 0x18f7002a, 0x18f70029,
2635 0x18f70028, 0x18f70027, 0x18f70026, 0x18f70025,
2636 0x17f7002d, 0x17f7002b, 0x17f7002a, 0x17f70029,
2637 0x17f70028, 0x17f70027, 0x17f70026, 0x17f70025,
2638 0x16f7002d, 0x16f7002b, 0x16f7002a, 0x16f70029,
2639 0x16f70028, 0x16f70027, 0x16f70026, 0x16f70025,
2640 0x15f7002d, 0x15f7002b, 0x15f7002a, 0x15f70029,
2641 0x15f70028, 0x15f70027, 0x15f70026, 0x15f70025,
2642 0x14f7002d, 0x14f7002b, 0x14f7002a, 0x14f70029,
2643 0x14f70028, 0x14f70027, 0x14f70026, 0x14f70025,
2644 0x13f7002d, 0x13f7002b, 0x13f7002a, 0x13f70029,
2645 0x13f70028, 0x13f70027, 0x13f70026, 0x13f70025,
2646 0x12f7002d, 0x12f7002b, 0x12f7002a, 0x12f70029,
2647 0x12f70028, 0x12f70027, 0x12f70026, 0x12f70025,
2648 0x11f7002d, 0x11f7002b, 0x11f7002a, 0x11f70029,
2649 0x11f70028, 0x11f70027, 0x11f70026, 0x11f70025,
2650 0x10f7002d, 0x10f7002b, 0x10f7002a, 0x10f70029,
2651 0x10f70028, 0x10f70027, 0x10f70026, 0x10f70025,
2652};
2653
2654const u32 txpwrctrl_tx_gain_ipa_rev6[] = {
2655 0x0ff7002d, 0x0ff7002b, 0x0ff7002a, 0x0ff70029,
2656 0x0ff70028, 0x0ff70027, 0x0ff70026, 0x0ff70025,
2657 0x0ef7002d, 0x0ef7002b, 0x0ef7002a, 0x0ef70029,
2658 0x0ef70028, 0x0ef70027, 0x0ef70026, 0x0ef70025,
2659 0x0df7002d, 0x0df7002b, 0x0df7002a, 0x0df70029,
2660 0x0df70028, 0x0df70027, 0x0df70026, 0x0df70025,
2661 0x0cf7002d, 0x0cf7002b, 0x0cf7002a, 0x0cf70029,
2662 0x0cf70028, 0x0cf70027, 0x0cf70026, 0x0cf70025,
2663 0x0bf7002d, 0x0bf7002b, 0x0bf7002a, 0x0bf70029,
2664 0x0bf70028, 0x0bf70027, 0x0bf70026, 0x0bf70025,
2665 0x0af7002d, 0x0af7002b, 0x0af7002a, 0x0af70029,
2666 0x0af70028, 0x0af70027, 0x0af70026, 0x0af70025,
2667 0x09f7002d, 0x09f7002b, 0x09f7002a, 0x09f70029,
2668 0x09f70028, 0x09f70027, 0x09f70026, 0x09f70025,
2669 0x08f7002d, 0x08f7002b, 0x08f7002a, 0x08f70029,
2670 0x08f70028, 0x08f70027, 0x08f70026, 0x08f70025,
2671 0x07f7002d, 0x07f7002b, 0x07f7002a, 0x07f70029,
2672 0x07f70028, 0x07f70027, 0x07f70026, 0x07f70025,
2673 0x06f7002d, 0x06f7002b, 0x06f7002a, 0x06f70029,
2674 0x06f70028, 0x06f70027, 0x06f70026, 0x06f70025,
2675 0x05f7002d, 0x05f7002b, 0x05f7002a, 0x05f70029,
2676 0x05f70028, 0x05f70027, 0x05f70026, 0x05f70025,
2677 0x04f7002d, 0x04f7002b, 0x04f7002a, 0x04f70029,
2678 0x04f70028, 0x04f70027, 0x04f70026, 0x04f70025,
2679 0x03f7002d, 0x03f7002b, 0x03f7002a, 0x03f70029,
2680 0x03f70028, 0x03f70027, 0x03f70026, 0x03f70025,
2681 0x02f7002d, 0x02f7002b, 0x02f7002a, 0x02f70029,
2682 0x02f70028, 0x02f70027, 0x02f70026, 0x02f70025,
2683 0x01f7002d, 0x01f7002b, 0x01f7002a, 0x01f70029,
2684 0x01f70028, 0x01f70027, 0x01f70026, 0x01f70025,
2685 0x00f7002d, 0x00f7002b, 0x00f7002a, 0x00f70029,
2686 0x00f70028, 0x00f70027, 0x00f70026, 0x00f70025,
2687};
2688
2689const u32 txpwrctrl_tx_gain_ipa_5g[] = {
2690 0x7ff70035, 0x7ff70033, 0x7ff70032, 0x7ff70031,
2691 0x7ff7002f, 0x7ff7002e, 0x7ff7002d, 0x7ff7002b,
2692 0x7ff7002a, 0x7ff70029, 0x7ff70028, 0x7ff70027,
2693 0x7ff70026, 0x7ff70024, 0x7ff70023, 0x7ff70022,
2694 0x7ef70028, 0x7ef70027, 0x7ef70026, 0x7ef70025,
2695 0x7ef70024, 0x7ef70023, 0x7df70028, 0x7df70027,
2696 0x7df70026, 0x7df70025, 0x7df70024, 0x7df70023,
2697 0x7df70022, 0x7cf70029, 0x7cf70028, 0x7cf70027,
2698 0x7cf70026, 0x7cf70025, 0x7cf70023, 0x7cf70022,
2699 0x7bf70029, 0x7bf70028, 0x7bf70026, 0x7bf70025,
2700 0x7bf70024, 0x7bf70023, 0x7bf70022, 0x7bf70021,
2701 0x7af70029, 0x7af70028, 0x7af70027, 0x7af70026,
2702 0x7af70025, 0x7af70024, 0x7af70023, 0x7af70022,
2703 0x79f70029, 0x79f70028, 0x79f70027, 0x79f70026,
2704 0x79f70025, 0x79f70024, 0x79f70023, 0x79f70022,
2705 0x78f70029, 0x78f70028, 0x78f70027, 0x78f70026,
2706 0x78f70025, 0x78f70024, 0x78f70023, 0x78f70022,
2707 0x77f70029, 0x77f70028, 0x77f70027, 0x77f70026,
2708 0x77f70025, 0x77f70024, 0x77f70023, 0x77f70022,
2709 0x76f70029, 0x76f70028, 0x76f70027, 0x76f70026,
2710 0x76f70024, 0x76f70023, 0x76f70022, 0x76f70021,
2711 0x75f70029, 0x75f70028, 0x75f70027, 0x75f70026,
2712 0x75f70025, 0x75f70024, 0x75f70023, 0x74f70029,
2713 0x74f70028, 0x74f70026, 0x74f70025, 0x74f70024,
2714 0x74f70023, 0x74f70022, 0x73f70029, 0x73f70027,
2715 0x73f70026, 0x73f70025, 0x73f70024, 0x73f70023,
2716 0x73f70022, 0x72f70028, 0x72f70027, 0x72f70026,
2717 0x72f70025, 0x72f70024, 0x72f70023, 0x72f70022,
2718 0x71f70028, 0x71f70027, 0x71f70026, 0x71f70025,
2719 0x71f70024, 0x71f70023, 0x70f70028, 0x70f70027,
2720 0x70f70026, 0x70f70024, 0x70f70023, 0x70f70022,
2721 0x70f70021, 0x70f70020, 0x70f70020, 0x70f7001f,
2722};
2723
2724const u16 tbl_iqcal_gainparams[2][9][8] = {
2725 {
2726 { 0x000, 0, 0, 2, 0x69, 0x69, 0x69, 0x69 },
2727 { 0x700, 7, 0, 0, 0x69, 0x69, 0x69, 0x69 },
2728 { 0x710, 7, 1, 0, 0x68, 0x68, 0x68, 0x68 },
2729 { 0x720, 7, 2, 0, 0x67, 0x67, 0x67, 0x67 },
2730 { 0x730, 7, 3, 0, 0x66, 0x66, 0x66, 0x66 },
2731 { 0x740, 7, 4, 0, 0x65, 0x65, 0x65, 0x65 },
2732 { 0x741, 7, 4, 1, 0x65, 0x65, 0x65, 0x65 },
2733 { 0x742, 7, 4, 2, 0x65, 0x65, 0x65, 0x65 },
2734 { 0x743, 7, 4, 3, 0x65, 0x65, 0x65, 0x65 }
2735 },
2736 {
2737 { 0x000, 7, 0, 0, 0x79, 0x79, 0x79, 0x79 },
2738 { 0x700, 7, 0, 0, 0x79, 0x79, 0x79, 0x79 },
2739 { 0x710, 7, 1, 0, 0x79, 0x79, 0x79, 0x79 },
2740 { 0x720, 7, 2, 0, 0x78, 0x78, 0x78, 0x78 },
2741 { 0x730, 7, 3, 0, 0x78, 0x78, 0x78, 0x78 },
2742 { 0x740, 7, 4, 0, 0x78, 0x78, 0x78, 0x78 },
2743 { 0x741, 7, 4, 1, 0x78, 0x78, 0x78, 0x78 },
2744 { 0x742, 7, 4, 2, 0x78, 0x78, 0x78, 0x78 },
2745 { 0x743, 7, 4, 3, 0x78, 0x78, 0x78, 0x78 }
2746 }
2747};
2748
2749const struct nphy_txiqcal_ladder ladder_lo[] = {
2750 { 3, 0 },
2751 { 4, 0 },
2752 { 6, 0 },
2753 { 9, 0 },
2754 { 13, 0 },
2755 { 18, 0 },
2756 { 25, 0 },
2757 { 25, 1 },
2758 { 25, 2 },
2759 { 25, 3 },
2760 { 25, 4 },
2761 { 25, 5 },
2762 { 25, 6 },
2763 { 25, 7 },
2764 { 35, 7 },
2765 { 50, 7 },
2766 { 71, 7 },
2767 { 100, 7 }
2768};
2769
2770const struct nphy_txiqcal_ladder ladder_iq[] = {
2771 { 3, 0 },
2772 { 4, 0 },
2773 { 6, 0 },
2774 { 9, 0 },
2775 { 13, 0 },
2776 { 18, 0 },
2777 { 25, 0 },
2778 { 35, 0 },
2779 { 50, 0 },
2780 { 71, 0 },
2781 { 100, 0 },
2782 { 100, 1 },
2783 { 100, 2 },
2784 { 100, 3 },
2785 { 100, 4 },
2786 { 100, 5 },
2787 { 100, 6 },
2788 { 100, 7 }
2789};
2790
2791const u16 loscale[] = {
2792 256, 256, 271, 271,
2793 287, 256, 256, 271,
2794 271, 287, 287, 304,
2795 304, 256, 256, 271,
2796 271, 287, 287, 304,
2797 304, 322, 322, 341,
2798 341, 362, 362, 383,
2799 383, 256, 256, 271,
2800 271, 287, 287, 304,
2801 304, 322, 322, 256,
2802 256, 271, 271, 287,
2803 287, 304, 304, 322,
2804 322, 341, 341, 362,
2805 362, 256, 256, 271,
2806 271, 287, 287, 304,
2807 304, 322, 322, 256,
2808 256, 271, 271, 287,
2809 287, 304, 304, 322,
2810 322, 341, 341, 362,
2811 362, 256, 256, 271,
2812 271, 287, 287, 304,
2813 304, 322, 322, 341,
2814 341, 362, 362, 383,
2815 383, 406, 406, 430,
2816 430, 455, 455, 482,
2817 482, 511, 511, 541,
2818 541, 573, 573, 607,
2819 607, 643, 643, 681,
2820 681, 722, 722, 764,
2821 764, 810, 810, 858,
2822 858, 908, 908, 962,
2823 962, 1019, 1019, 256
2824};
2825
2826const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
2827 0x0200, 0x0300, 0x0400, 0x0700,
2828 0x0900, 0x0c00, 0x1200, 0x1201,
2829 0x1202, 0x1203, 0x1204, 0x1205,
2830 0x1206, 0x1207, 0x1907, 0x2307,
2831 0x3207, 0x4707
2832};
2833
2834const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
2835 0x0300, 0x0500, 0x0700, 0x0900,
2836 0x0d00, 0x1100, 0x1900, 0x1901,
2837 0x1902, 0x1903, 0x1904, 0x1905,
2838 0x1906, 0x1907, 0x2407, 0x3207,
2839 0x4607, 0x6407
2840};
2841
2842const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
2843 0x0100, 0x0200, 0x0400, 0x0700,
2844 0x0900, 0x0c00, 0x1200, 0x1900,
2845 0x2300, 0x3200, 0x4700, 0x4701,
2846 0x4702, 0x4703, 0x4704, 0x4705,
2847 0x4706, 0x4707
2848};
2849
2850const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
2851 0x0200, 0x0300, 0x0600, 0x0900,
2852 0x0d00, 0x1100, 0x1900, 0x2400,
2853 0x3200, 0x4600, 0x6400, 0x6401,
2854 0x6402, 0x6403, 0x6404, 0x6405,
2855 0x6406, 0x6407
2856};
2857
2858const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3] = { };
2859
2860const u16 tbl_tx_iqlo_cal_startcoefs[B43_NTAB_TX_IQLO_CAL_STARTCOEFS] = { };
2861
2862const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
2863 0x8423, 0x8323, 0x8073, 0x8256,
2864 0x8045, 0x8223, 0x9423, 0x9323,
2865 0x9073, 0x9256, 0x9045, 0x9223
2866};
2867
2868const u16 tbl_tx_iqlo_cal_cmds_recal[] = {
2869 0x8101, 0x8253, 0x8053, 0x8234,
2870 0x8034, 0x9101, 0x9253, 0x9053,
2871 0x9234, 0x9034
2872};
2873
2874const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
2875 0x8123, 0x8264, 0x8086, 0x8245,
2876 0x8056, 0x9123, 0x9264, 0x9086,
2877 0x9245, 0x9056
2878};
2879
2880const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
2881 0x8434, 0x8334, 0x8084, 0x8267,
2882 0x8056, 0x8234, 0x9434, 0x9334,
2883 0x9084, 0x9267, 0x9056, 0x9234
2884};
2885
2886const s16 tbl_tx_filter_coef_rev4[7][15] = {
2887 { -377, 137, -407, 208, -1527,
2888 956, 93, 186, 93, 230,
2889 -44, 230, 20, -191, 201 },
2890 { -77, 20, -98, 49, -93,
2891 60, 56, 111, 56, 26,
2892 -5, 26, 34, -32, 34 },
2893 { -360, 164, -376, 164, -1533,
2894 576, 308, -314, 308, 121,
2895 -73, 121, 91, 124, 91 },
2896 { -295, 200, -363, 142, -1391,
2897 826, 151, 301, 151, 151,
2898 301, 151, 602, -752, 602 },
2899 { -92, 58, -96, 49, -104,
2900 44, 17, 35, 17, 12,
2901 25, 12, 13, 27, 13 },
2902 { -375, 136, -399, 209, -1479,
2903 949, 130, 260, 130, 230,
2904 -44, 230, 201, -191, 201 },
2905 { 0xed9, 0xc8, 0xe95, 0x8e, 0xa91,
2906 0x33a, 0x97, 0x12d, 0x97, 0x97,
2907 0x12d, 0x97, 0x25a, 0xd10, 0x25a }
2908};
2909
2910/* addr0, addr1, bmask, shift */
2911const struct nphy_rf_control_override_rev2 tbl_rf_control_override_rev2[] = {
2912 { 0x78, 0x78, 0x0038, 3 }, /* for field == 0x0002 (fls == 2) */
2913 { 0x7A, 0x7D, 0x0001, 0 }, /* for field == 0x0004 (fls == 3) */
2914 { 0x7A, 0x7D, 0x0002, 1 }, /* for field == 0x0008 (fls == 4) */
2915 { 0x7A, 0x7D, 0x0004, 2 }, /* for field == 0x0010 (fls == 5) */
2916 { 0x7A, 0x7D, 0x0030, 4 }, /* for field == 0x0020 (fls == 6) */
2917 { 0x7A, 0x7D, 0x00C0, 6 }, /* for field == 0x0040 (fls == 7) */
2918 { 0x7A, 0x7D, 0x0100, 8 }, /* for field == 0x0080 (fls == 8) */
2919 { 0x7A, 0x7D, 0x0200, 9 }, /* for field == 0x0100 (fls == 9) */
2920 { 0x78, 0x78, 0x0004, 2 }, /* for field == 0x0200 (fls == 10) */
2921 { 0x7B, 0x7E, 0x01FF, 0 }, /* for field == 0x0400 (fls == 11) */
2922 { 0x7C, 0x7F, 0x01FF, 0 }, /* for field == 0x0800 (fls == 12) */
2923 { 0x78, 0x78, 0x0100, 8 }, /* for field == 0x1000 (fls == 13) */
2924 { 0x78, 0x78, 0x0200, 9 }, /* for field == 0x2000 (fls == 14) */
2925 { 0x78, 0x78, 0xF000, 12 } /* for field == 0x4000 (fls == 15) */
2926};
2927
2928/* val_mask, val_shift, en_addr0, val_addr0, en_addr1, val_addr1 */
2929const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = {
2930 { 0x8000, 15, 0xE5, 0xF9, 0xE6, 0xFB }, /* field == 0x0001 (fls 1) */
2931 { 0x0001, 0, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0002 (fls 2) */
2932 { 0x0002, 1, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0004 (fls 3) */
2933 { 0x0004, 2, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0008 (fls 4) */
2934 { 0x0016, 4, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0010 (fls 5) */
2935 { 0x0020, 5, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0020 (fls 6) */
2936 { 0x0040, 6, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0040 (fls 7) */
2937 { 0x0080, 6, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0080 (fls 8) */
2938 { 0x0100, 7, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0100 (fls 9) */
2939 { 0x0007, 0, 0xE7, 0xF8, 0xEC, 0xFA }, /* field == 0x0200 (fls 10) */
2940 { 0x0070, 4, 0xE7, 0xF8, 0xEC, 0xFA }, /* field == 0x0400 (fls 11) */
2941 { 0xE000, 13, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0800 (fls 12) */
2942 { 0xFFFF, 0, 0xE7, 0x7B, 0xEC, 0x7E }, /* field == 0x1000 (fls 13) */
2943 { 0xFFFF, 0, 0xE7, 0x7C, 0xEC, 0x7F }, /* field == 0x2000 (fls 14) */
2944 { 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */
2945};
2946
2409static inline void assert_ntab_array_sizes(void) 2947static inline void assert_ntab_array_sizes(void)
2410{ 2948{
2411#undef check 2949#undef check
@@ -2442,6 +2980,72 @@ static inline void assert_ntab_array_sizes(void)
2442#undef check 2980#undef check
2443} 2981}
2444 2982
2983u32 b43_ntab_read(struct b43_wldev *dev, u32 offset)
2984{
2985 u32 type, value;
2986
2987 type = offset & B43_NTAB_TYPEMASK;
2988 offset &= ~B43_NTAB_TYPEMASK;
2989 B43_WARN_ON(offset > 0xFFFF);
2990
2991 switch (type) {
2992 case B43_NTAB_8BIT:
2993 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
2994 value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF;
2995 break;
2996 case B43_NTAB_16BIT:
2997 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
2998 value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
2999 break;
3000 case B43_NTAB_32BIT:
3001 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
3002 value = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI);
3003 value <<= 16;
3004 value |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
3005 break;
3006 default:
3007 B43_WARN_ON(1);
3008 value = 0;
3009 }
3010
3011 return value;
3012}
3013
3014void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
3015 unsigned int nr_elements, void *_data)
3016{
3017 u32 type;
3018 u8 *data = _data;
3019 unsigned int i;
3020
3021 type = offset & B43_NTAB_TYPEMASK;
3022 offset &= ~B43_NTAB_TYPEMASK;
3023 B43_WARN_ON(offset > 0xFFFF);
3024
3025 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
3026
3027 for (i = 0; i < nr_elements; i++) {
3028 switch (type) {
3029 case B43_NTAB_8BIT:
3030 *data = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF;
3031 data++;
3032 break;
3033 case B43_NTAB_16BIT:
3034 *((u16 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
3035 data += 2;
3036 break;
3037 case B43_NTAB_32BIT:
3038 *((u32 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI);
3039 *((u32 *)data) <<= 16;
3040 *((u32 *)data) |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
3041 data += 4;
3042 break;
3043 default:
3044 B43_WARN_ON(1);
3045 }
3046 }
3047}
3048
2445void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value) 3049void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value)
2446{ 3050{
2447 u32 type; 3051 u32 type;
@@ -2474,3 +3078,91 @@ void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value)
2474 /* Some compiletime assertions... */ 3078 /* Some compiletime assertions... */
2475 assert_ntab_array_sizes(); 3079 assert_ntab_array_sizes();
2476} 3080}
3081
3082void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
3083 unsigned int nr_elements, const void *_data)
3084{
3085 u32 type, value;
3086 const u8 *data = _data;
3087 unsigned int i;
3088
3089 type = offset & B43_NTAB_TYPEMASK;
3090 offset &= ~B43_NTAB_TYPEMASK;
3091 B43_WARN_ON(offset > 0xFFFF);
3092
3093 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
3094
3095 for (i = 0; i < nr_elements; i++) {
3096 switch (type) {
3097 case B43_NTAB_8BIT:
3098 value = *data;
3099 data++;
3100 B43_WARN_ON(value & ~0xFF);
3101 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, value);
3102 break;
3103 case B43_NTAB_16BIT:
3104 value = *((u16 *)data);
3105 data += 2;
3106 B43_WARN_ON(value & ~0xFFFF);
3107 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, value);
3108 break;
3109 case B43_NTAB_32BIT:
3110 value = *((u32 *)data);
3111 data += 4;
3112 b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, value >> 16);
3113 b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
3114 value & 0xFFFF);
3115 break;
3116 default:
3117 B43_WARN_ON(1);
3118 }
3119 }
3120}
3121
3122#define ntab_upload(dev, offset, data) do { \
3123 unsigned int i; \
3124 for (i = 0; i < (offset##_SIZE); i++) \
3125 b43_ntab_write(dev, (offset) + i, (data)[i]); \
3126 } while (0)
3127
3128void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
3129{
3130 /* Static tables */
3131 ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct);
3132 ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup);
3133 ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap);
3134 ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn);
3135 ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel);
3136 ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot);
3137 ntab_upload(dev, B43_NTAB_PILOTLT, b43_ntab_pilotlt);
3138 ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0);
3139 ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1);
3140 ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0);
3141 ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1);
3142 ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi);
3143 ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest);
3144 ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs);
3145
3146 /* Volatile tables */
3147 ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10);
3148 ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11);
3149 ntab_upload(dev, B43_NTAB_C0_ESTPLT, b43_ntab_estimatepowerlt0);
3150 ntab_upload(dev, B43_NTAB_C1_ESTPLT, b43_ntab_estimatepowerlt1);
3151 ntab_upload(dev, B43_NTAB_C0_ADJPLT, b43_ntab_adjustpower0);
3152 ntab_upload(dev, B43_NTAB_C1_ADJPLT, b43_ntab_adjustpower1);
3153 ntab_upload(dev, B43_NTAB_C0_GAINCTL, b43_ntab_gainctl0);
3154 ntab_upload(dev, B43_NTAB_C1_GAINCTL, b43_ntab_gainctl1);
3155 ntab_upload(dev, B43_NTAB_C0_IQLT, b43_ntab_iqlt0);
3156 ntab_upload(dev, B43_NTAB_C1_IQLT, b43_ntab_iqlt1);
3157 ntab_upload(dev, B43_NTAB_C0_LOFEEDTH, b43_ntab_loftlt0);
3158 ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1);
3159}
3160
3161void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
3162{
3163 /* Static tables */
3164 /* TODO */
3165
3166 /* Volatile tables */
3167 /* TODO */
3168}
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 4d498b053ec7..9c1c6ecd3672 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -46,6 +46,27 @@ struct b43_nphy_channeltab_entry {
46 46
47struct b43_wldev; 47struct b43_wldev;
48 48
49struct nphy_txiqcal_ladder {
50 u8 percent;
51 u8 g_env;
52};
53
54struct nphy_rf_control_override_rev2 {
55 u8 addr0;
56 u8 addr1;
57 u16 bmask;
58 u8 shift;
59};
60
61struct nphy_rf_control_override_rev3 {
62 u16 val_mask;
63 u8 val_shift;
64 u8 en_addr0;
65 u8 val_addr0;
66 u8 en_addr1;
67 u8 val_addr1;
68};
69
49/* Upload the default register value table. 70/* Upload the default register value table.
50 * If "ghz5" is true, we upload the 5Ghz table. Otherwise the 2.4Ghz 71 * If "ghz5" is true, we upload the 5Ghz table. Otherwise the 2.4Ghz
51 * table is uploaded. If "ignore_uploadflag" is true, we upload any value 72 * table is uploaded. If "ignore_uploadflag" is true, we upload any value
@@ -126,34 +147,57 @@ b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel);
126#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */ 147#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */
127#define B43_NTAB_C1_LOFEEDTH_SIZE 128 148#define B43_NTAB_C1_LOFEEDTH_SIZE 128
128 149
150#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_40_SIZE 18
151#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_20_SIZE 18
152#define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_40_SIZE 18
153#define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_20_SIZE 18
154#define B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3 11
155#define B43_NTAB_TX_IQLO_CAL_STARTCOEFS 9
156#define B43_NTAB_TX_IQLO_CAL_CMDS_RECAL_REV3 12
157#define B43_NTAB_TX_IQLO_CAL_CMDS_RECAL 10
158#define B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL 10
159#define B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL_REV3 12
160
161u32 b43_ntab_read(struct b43_wldev *dev, u32 offset);
162void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
163 unsigned int nr_elements, void *_data);
129void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value); 164void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value);
130 165void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
131extern const u8 b43_ntab_adjustpower0[]; 166 unsigned int nr_elements, const void *_data);
132extern const u8 b43_ntab_adjustpower1[]; 167
133extern const u16 b43_ntab_bdi[]; 168void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev);
134extern const u32 b43_ntab_channelest[]; 169void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev);
135extern const u8 b43_ntab_estimatepowerlt0[]; 170
136extern const u8 b43_ntab_estimatepowerlt1[]; 171extern const u32 b43_ntab_tx_gain_rev0_1_2[];
137extern const u8 b43_ntab_framelookup[]; 172extern const u32 b43_ntab_tx_gain_rev3plus_2ghz[];
138extern const u32 b43_ntab_framestruct[]; 173extern const u32 b43_ntab_tx_gain_rev3_5ghz[];
139extern const u32 b43_ntab_gainctl0[]; 174extern const u32 b43_ntab_tx_gain_rev4_5ghz[];
140extern const u32 b43_ntab_gainctl1[]; 175extern const u32 b43_ntab_tx_gain_rev5plus_5ghz[];
141extern const u32 b43_ntab_intlevel[]; 176
142extern const u32 b43_ntab_iqlt0[]; 177extern const u32 txpwrctrl_tx_gain_ipa[];
143extern const u32 b43_ntab_iqlt1[]; 178extern const u32 txpwrctrl_tx_gain_ipa_rev5[];
144extern const u16 b43_ntab_loftlt0[]; 179extern const u32 txpwrctrl_tx_gain_ipa_rev6[];
145extern const u16 b43_ntab_loftlt1[]; 180extern const u32 txpwrctrl_tx_gain_ipa_5g[];
146extern const u8 b43_ntab_mcs[]; 181extern const u16 tbl_iqcal_gainparams[2][9][8];
147extern const u32 b43_ntab_noisevar10[]; 182extern const struct nphy_txiqcal_ladder ladder_lo[];
148extern const u32 b43_ntab_noisevar11[]; 183extern const struct nphy_txiqcal_ladder ladder_iq[];
149extern const u16 b43_ntab_pilot[]; 184extern const u16 loscale[];
150extern const u32 b43_ntab_pilotlt[]; 185
151extern const u32 b43_ntab_tdi20a0[]; 186extern const u16 tbl_tx_iqlo_cal_loft_ladder_40[];
152extern const u32 b43_ntab_tdi20a1[]; 187extern const u16 tbl_tx_iqlo_cal_loft_ladder_20[];
153extern const u32 b43_ntab_tdi40a0[]; 188extern const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[];
154extern const u32 b43_ntab_tdi40a1[]; 189extern const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[];
155extern const u32 b43_ntab_tdtrn[]; 190extern const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[];
156extern const u32 b43_ntab_tmap[]; 191extern const u16 tbl_tx_iqlo_cal_startcoefs[];
157 192extern const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[];
193extern const u16 tbl_tx_iqlo_cal_cmds_recal[];
194extern const u16 tbl_tx_iqlo_cal_cmds_fullcal[];
195extern const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[];
196extern const s16 tbl_tx_filter_coef_rev4[7][15];
197
198extern const struct nphy_rf_control_override_rev2
199 tbl_rf_control_override_rev2[];
200extern const struct nphy_rf_control_override_rev3
201 tbl_rf_control_override_rev3[];
158 202
159#endif /* B43_TABLES_NPHY_H_ */ 203#endif /* B43_TABLES_NPHY_H_ */
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 0a86bdf53154..8b9387c6ff36 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -1411,7 +1411,6 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1411 b43legacyerr(dev->wl, "DMA tx mapping failure\n"); 1411 b43legacyerr(dev->wl, "DMA tx mapping failure\n");
1412 goto out_unlock; 1412 goto out_unlock;
1413 } 1413 }
1414 ring->nr_tx_packets++;
1415 if ((free_slots(ring) < SLOTS_PER_PACKET) || 1414 if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1416 should_inject_overflow(ring)) { 1415 should_inject_overflow(ring)) {
1417 /* This TX ring is full. */ 1416 /* This TX ring is full. */
@@ -1527,25 +1526,6 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
1527 spin_unlock(&ring->lock); 1526 spin_unlock(&ring->lock);
1528} 1527}
1529 1528
1530void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
1531 struct ieee80211_tx_queue_stats *stats)
1532{
1533 const int nr_queues = dev->wl->hw->queues;
1534 struct b43legacy_dmaring *ring;
1535 unsigned long flags;
1536 int i;
1537
1538 for (i = 0; i < nr_queues; i++) {
1539 ring = priority_to_txring(dev, i);
1540
1541 spin_lock_irqsave(&ring->lock, flags);
1542 stats[i].len = ring->used_slots / SLOTS_PER_PACKET;
1543 stats[i].limit = ring->nr_slots / SLOTS_PER_PACKET;
1544 stats[i].count = ring->nr_tx_packets;
1545 spin_unlock_irqrestore(&ring->lock, flags);
1546 }
1547}
1548
1549static void dma_rx(struct b43legacy_dmaring *ring, 1529static void dma_rx(struct b43legacy_dmaring *ring,
1550 int *slot) 1530 int *slot)
1551{ 1531{
diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h
index 2f186003c31e..f9681041c2d8 100644
--- a/drivers/net/wireless/b43legacy/dma.h
+++ b/drivers/net/wireless/b43legacy/dma.h
@@ -243,8 +243,6 @@ struct b43legacy_dmaring {
243 int used_slots; 243 int used_slots;
244 /* Currently used slot in the ring. */ 244 /* Currently used slot in the ring. */
245 int current_slot; 245 int current_slot;
246 /* Total number of packets sent. Statistics only. */
247 unsigned int nr_tx_packets;
248 /* Frameoffset in octets. */ 246 /* Frameoffset in octets. */
249 u32 frameoffset; 247 u32 frameoffset;
250 /* Descriptor buffer size. */ 248 /* Descriptor buffer size. */
@@ -292,9 +290,6 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev);
292void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev); 290void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev);
293void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev); 291void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev);
294 292
295void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
296 struct ieee80211_tx_queue_stats *stats);
297
298int b43legacy_dma_tx(struct b43legacy_wldev *dev, 293int b43legacy_dma_tx(struct b43legacy_wldev *dev,
299 struct sk_buff *skb); 294 struct sk_buff *skb);
300void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, 295void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
@@ -315,11 +310,6 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev)
315{ 310{
316} 311}
317static inline 312static inline
318void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
319 struct ieee80211_tx_queue_stats *stats)
320{
321}
322static inline
323int b43legacy_dma_tx(struct b43legacy_wldev *dev, 313int b43legacy_dma_tx(struct b43legacy_wldev *dev,
324 struct sk_buff *skb) 314 struct sk_buff *skb)
325{ 315{
diff --git a/drivers/net/wireless/b43legacy/leds.h b/drivers/net/wireless/b43legacy/leds.h
index 82167a90088f..9ff6750dc57f 100644
--- a/drivers/net/wireless/b43legacy/leds.h
+++ b/drivers/net/wireless/b43legacy/leds.h
@@ -45,7 +45,7 @@ enum b43legacy_led_behaviour {
45void b43legacy_leds_init(struct b43legacy_wldev *dev); 45void b43legacy_leds_init(struct b43legacy_wldev *dev);
46void b43legacy_leds_exit(struct b43legacy_wldev *dev); 46void b43legacy_leds_exit(struct b43legacy_wldev *dev);
47 47
48#else /* CONFIG_B43EGACY_LEDS */ 48#else /* CONFIG_B43LEGACY_LEDS */
49/* LED support disabled */ 49/* LED support disabled */
50 50
51struct b43legacy_led { 51struct b43legacy_led {
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 4a905b6a886b..1d070be5a678 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -61,6 +61,8 @@ MODULE_AUTHOR("Michael Buesch");
61MODULE_LICENSE("GPL"); 61MODULE_LICENSE("GPL");
62 62
63MODULE_FIRMWARE(B43legacy_SUPPORTED_FIRMWARE_ID); 63MODULE_FIRMWARE(B43legacy_SUPPORTED_FIRMWARE_ID);
64MODULE_FIRMWARE("b43legacy/ucode2.fw");
65MODULE_FIRMWARE("b43legacy/ucode4.fw");
64 66
65#if defined(CONFIG_B43LEGACY_DMA) && defined(CONFIG_B43LEGACY_PIO) 67#if defined(CONFIG_B43LEGACY_DMA) && defined(CONFIG_B43LEGACY_PIO)
66static int modparam_pio; 68static int modparam_pio;
@@ -2444,29 +2446,6 @@ static int b43legacy_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
2444 return 0; 2446 return 0;
2445} 2447}
2446 2448
2447static int b43legacy_op_get_tx_stats(struct ieee80211_hw *hw,
2448 struct ieee80211_tx_queue_stats *stats)
2449{
2450 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
2451 struct b43legacy_wldev *dev = wl->current_dev;
2452 unsigned long flags;
2453 int err = -ENODEV;
2454
2455 if (!dev)
2456 goto out;
2457 spin_lock_irqsave(&wl->irq_lock, flags);
2458 if (likely(b43legacy_status(dev) >= B43legacy_STAT_STARTED)) {
2459 if (b43legacy_using_pio(dev))
2460 b43legacy_pio_get_tx_stats(dev, stats);
2461 else
2462 b43legacy_dma_get_tx_stats(dev, stats);
2463 err = 0;
2464 }
2465 spin_unlock_irqrestore(&wl->irq_lock, flags);
2466out:
2467 return err;
2468}
2469
2470static int b43legacy_op_get_stats(struct ieee80211_hw *hw, 2449static int b43legacy_op_get_stats(struct ieee80211_hw *hw,
2471 struct ieee80211_low_level_stats *stats) 2450 struct ieee80211_low_level_stats *stats)
2472{ 2451{
@@ -2921,6 +2900,7 @@ static int b43legacy_wireless_core_start(struct b43legacy_wldev *dev)
2921 goto out; 2900 goto out;
2922 } 2901 }
2923 /* We are ready to run. */ 2902 /* We are ready to run. */
2903 ieee80211_wake_queues(dev->wl->hw);
2924 b43legacy_set_status(dev, B43legacy_STAT_STARTED); 2904 b43legacy_set_status(dev, B43legacy_STAT_STARTED);
2925 2905
2926 /* Start data flow (TX/RX) */ 2906 /* Start data flow (TX/RX) */
@@ -3341,6 +3321,7 @@ static int b43legacy_wireless_core_init(struct b43legacy_wldev *dev)
3341 b43legacy_security_init(dev); 3321 b43legacy_security_init(dev);
3342 b43legacy_rng_init(wl); 3322 b43legacy_rng_init(wl);
3343 3323
3324 ieee80211_wake_queues(dev->wl->hw);
3344 b43legacy_set_status(dev, B43legacy_STAT_INITIALIZED); 3325 b43legacy_set_status(dev, B43legacy_STAT_INITIALIZED);
3345 3326
3346 b43legacy_leds_init(dev); 3327 b43legacy_leds_init(dev);
@@ -3361,7 +3342,7 @@ err_kfree_lo_control:
3361} 3342}
3362 3343
3363static int b43legacy_op_add_interface(struct ieee80211_hw *hw, 3344static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
3364 struct ieee80211_if_init_conf *conf) 3345 struct ieee80211_vif *vif)
3365{ 3346{
3366 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); 3347 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
3367 struct b43legacy_wldev *dev; 3348 struct b43legacy_wldev *dev;
@@ -3370,23 +3351,23 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
3370 3351
3371 /* TODO: allow WDS/AP devices to coexist */ 3352 /* TODO: allow WDS/AP devices to coexist */
3372 3353
3373 if (conf->type != NL80211_IFTYPE_AP && 3354 if (vif->type != NL80211_IFTYPE_AP &&
3374 conf->type != NL80211_IFTYPE_STATION && 3355 vif->type != NL80211_IFTYPE_STATION &&
3375 conf->type != NL80211_IFTYPE_WDS && 3356 vif->type != NL80211_IFTYPE_WDS &&
3376 conf->type != NL80211_IFTYPE_ADHOC) 3357 vif->type != NL80211_IFTYPE_ADHOC)
3377 return -EOPNOTSUPP; 3358 return -EOPNOTSUPP;
3378 3359
3379 mutex_lock(&wl->mutex); 3360 mutex_lock(&wl->mutex);
3380 if (wl->operating) 3361 if (wl->operating)
3381 goto out_mutex_unlock; 3362 goto out_mutex_unlock;
3382 3363
3383 b43legacydbg(wl, "Adding Interface type %d\n", conf->type); 3364 b43legacydbg(wl, "Adding Interface type %d\n", vif->type);
3384 3365
3385 dev = wl->current_dev; 3366 dev = wl->current_dev;
3386 wl->operating = 1; 3367 wl->operating = 1;
3387 wl->vif = conf->vif; 3368 wl->vif = vif;
3388 wl->if_type = conf->type; 3369 wl->if_type = vif->type;
3389 memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN); 3370 memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
3390 3371
3391 spin_lock_irqsave(&wl->irq_lock, flags); 3372 spin_lock_irqsave(&wl->irq_lock, flags);
3392 b43legacy_adjust_opmode(dev); 3373 b43legacy_adjust_opmode(dev);
@@ -3403,18 +3384,18 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
3403} 3384}
3404 3385
3405static void b43legacy_op_remove_interface(struct ieee80211_hw *hw, 3386static void b43legacy_op_remove_interface(struct ieee80211_hw *hw,
3406 struct ieee80211_if_init_conf *conf) 3387 struct ieee80211_vif *vif)
3407{ 3388{
3408 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); 3389 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
3409 struct b43legacy_wldev *dev = wl->current_dev; 3390 struct b43legacy_wldev *dev = wl->current_dev;
3410 unsigned long flags; 3391 unsigned long flags;
3411 3392
3412 b43legacydbg(wl, "Removing Interface type %d\n", conf->type); 3393 b43legacydbg(wl, "Removing Interface type %d\n", vif->type);
3413 3394
3414 mutex_lock(&wl->mutex); 3395 mutex_lock(&wl->mutex);
3415 3396
3416 B43legacy_WARN_ON(!wl->operating); 3397 B43legacy_WARN_ON(!wl->operating);
3417 B43legacy_WARN_ON(wl->vif != conf->vif); 3398 B43legacy_WARN_ON(wl->vif != vif);
3418 wl->vif = NULL; 3399 wl->vif = NULL;
3419 3400
3420 wl->operating = 0; 3401 wl->operating = 0;
@@ -3509,7 +3490,6 @@ static const struct ieee80211_ops b43legacy_hw_ops = {
3509 .bss_info_changed = b43legacy_op_bss_info_changed, 3490 .bss_info_changed = b43legacy_op_bss_info_changed,
3510 .configure_filter = b43legacy_op_configure_filter, 3491 .configure_filter = b43legacy_op_configure_filter,
3511 .get_stats = b43legacy_op_get_stats, 3492 .get_stats = b43legacy_op_get_stats,
3512 .get_tx_stats = b43legacy_op_get_tx_stats,
3513 .start = b43legacy_op_start, 3493 .start = b43legacy_op_start,
3514 .stop = b43legacy_op_stop, 3494 .stop = b43legacy_op_stop,
3515 .set_tim = b43legacy_op_beacon_set_tim, 3495 .set_tim = b43legacy_op_beacon_set_tim,
@@ -3960,7 +3940,7 @@ static struct ssb_driver b43legacy_ssb_driver = {
3960 3940
3961static void b43legacy_print_driverinfo(void) 3941static void b43legacy_print_driverinfo(void)
3962{ 3942{
3963 const char *feat_pci = "", *feat_leds = "", *feat_rfkill = "", 3943 const char *feat_pci = "", *feat_leds = "",
3964 *feat_pio = "", *feat_dma = ""; 3944 *feat_pio = "", *feat_dma = "";
3965 3945
3966#ifdef CONFIG_B43LEGACY_PCI_AUTOSELECT 3946#ifdef CONFIG_B43LEGACY_PCI_AUTOSELECT
@@ -3969,9 +3949,6 @@ static void b43legacy_print_driverinfo(void)
3969#ifdef CONFIG_B43LEGACY_LEDS 3949#ifdef CONFIG_B43LEGACY_LEDS
3970 feat_leds = "L"; 3950 feat_leds = "L";
3971#endif 3951#endif
3972#ifdef CONFIG_B43LEGACY_RFKILL
3973 feat_rfkill = "R";
3974#endif
3975#ifdef CONFIG_B43LEGACY_PIO 3952#ifdef CONFIG_B43LEGACY_PIO
3976 feat_pio = "I"; 3953 feat_pio = "I";
3977#endif 3954#endif
@@ -3979,9 +3956,9 @@ static void b43legacy_print_driverinfo(void)
3979 feat_dma = "D"; 3956 feat_dma = "D";
3980#endif 3957#endif
3981 printk(KERN_INFO "Broadcom 43xx-legacy driver loaded " 3958 printk(KERN_INFO "Broadcom 43xx-legacy driver loaded "
3982 "[ Features: %s%s%s%s%s, Firmware-ID: " 3959 "[ Features: %s%s%s%s, Firmware-ID: "
3983 B43legacy_SUPPORTED_FIRMWARE_ID " ]\n", 3960 B43legacy_SUPPORTED_FIRMWARE_ID " ]\n",
3984 feat_pci, feat_leds, feat_rfkill, feat_pio, feat_dma); 3961 feat_pci, feat_leds, feat_pio, feat_dma);
3985} 3962}
3986 3963
3987static int __init b43legacy_init(void) 3964static int __init b43legacy_init(void)
diff --git a/drivers/net/wireless/b43legacy/pio.c b/drivers/net/wireless/b43legacy/pio.c
index 51866c9a2769..017c0e9c37ef 100644
--- a/drivers/net/wireless/b43legacy/pio.c
+++ b/drivers/net/wireless/b43legacy/pio.c
@@ -477,7 +477,6 @@ int b43legacy_pio_tx(struct b43legacy_wldev *dev,
477 477
478 list_move_tail(&packet->list, &queue->txqueue); 478 list_move_tail(&packet->list, &queue->txqueue);
479 queue->nr_txfree--; 479 queue->nr_txfree--;
480 queue->nr_tx_packets++;
481 B43legacy_WARN_ON(queue->nr_txfree >= B43legacy_PIO_MAXTXPACKETS); 480 B43legacy_WARN_ON(queue->nr_txfree >= B43legacy_PIO_MAXTXPACKETS);
482 481
483 tasklet_schedule(&queue->txtask); 482 tasklet_schedule(&queue->txtask);
@@ -546,18 +545,6 @@ void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
546 tasklet_schedule(&queue->txtask); 545 tasklet_schedule(&queue->txtask);
547} 546}
548 547
549void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev,
550 struct ieee80211_tx_queue_stats *stats)
551{
552 struct b43legacy_pio *pio = &dev->pio;
553 struct b43legacy_pioqueue *queue;
554
555 queue = pio->queue1;
556 stats[0].len = B43legacy_PIO_MAXTXPACKETS - queue->nr_txfree;
557 stats[0].limit = B43legacy_PIO_MAXTXPACKETS;
558 stats[0].count = queue->nr_tx_packets;
559}
560
561static void pio_rx_error(struct b43legacy_pioqueue *queue, 548static void pio_rx_error(struct b43legacy_pioqueue *queue,
562 int clear_buffers, 549 int clear_buffers,
563 const char *error) 550 const char *error)
diff --git a/drivers/net/wireless/b43legacy/pio.h b/drivers/net/wireless/b43legacy/pio.h
index 464fec05a06d..8e6773ea6e75 100644
--- a/drivers/net/wireless/b43legacy/pio.h
+++ b/drivers/net/wireless/b43legacy/pio.h
@@ -74,10 +74,6 @@ struct b43legacy_pioqueue {
74 * posted to the device. We are waiting for the txstatus. 74 * posted to the device. We are waiting for the txstatus.
75 */ 75 */
76 struct list_head txrunning; 76 struct list_head txrunning;
77 /* Total number or packets sent.
78 * (This counter can obviously wrap).
79 */
80 unsigned int nr_tx_packets;
81 struct tasklet_struct txtask; 77 struct tasklet_struct txtask;
82 struct b43legacy_pio_txpacket 78 struct b43legacy_pio_txpacket
83 tx_packets_cache[B43legacy_PIO_MAXTXPACKETS]; 79 tx_packets_cache[B43legacy_PIO_MAXTXPACKETS];
@@ -106,8 +102,6 @@ int b43legacy_pio_tx(struct b43legacy_wldev *dev,
106 struct sk_buff *skb); 102 struct sk_buff *skb);
107void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev, 103void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
108 const struct b43legacy_txstatus *status); 104 const struct b43legacy_txstatus *status);
109void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev,
110 struct ieee80211_tx_queue_stats *stats);
111void b43legacy_pio_rx(struct b43legacy_pioqueue *queue); 105void b43legacy_pio_rx(struct b43legacy_pioqueue *queue);
112 106
113/* Suspend TX queue in hardware. */ 107/* Suspend TX queue in hardware. */
@@ -140,11 +134,6 @@ void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
140{ 134{
141} 135}
142static inline 136static inline
143void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev,
144 struct ieee80211_tx_queue_stats *stats)
145{
146}
147static inline
148void b43legacy_pio_rx(struct b43legacy_pioqueue *queue) 137void b43legacy_pio_rx(struct b43legacy_pioqueue *queue)
149{ 138{
150} 139}
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index c9640a3e02c9..d19748d90aaf 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -794,13 +794,6 @@ static struct pcmcia_device_id hostap_cs_ids[] = {
794 PCMCIA_MFC_DEVICE_PROD_ID12(0, "SanDisk", "ConnectPlus", 794 PCMCIA_MFC_DEVICE_PROD_ID12(0, "SanDisk", "ConnectPlus",
795 0x7a954bd9, 0x74be00c6), 795 0x7a954bd9, 0x74be00c6),
796 PCMCIA_DEVICE_PROD_ID123( 796 PCMCIA_DEVICE_PROD_ID123(
797 "Intersil", "PRISM 2_5 PCMCIA ADAPTER", "ISL37300P",
798 0x4b801a17, 0x6345a0bf, 0xc9049a39),
799 /* D-Link DWL-650 Rev. P1; manfid 0x000b, 0x7110 */
800 PCMCIA_DEVICE_PROD_ID123(
801 "D-Link", "DWL-650 Wireless PC Card RevP", "ISL37101P-10",
802 0x1a424a1c, 0x6ea57632, 0xdd97a26b),
803 PCMCIA_DEVICE_PROD_ID123(
804 "Addtron", "AWP-100 Wireless PCMCIA", "Version 01.02", 797 "Addtron", "AWP-100 Wireless PCMCIA", "Version 01.02",
805 0xe6ec52ce, 0x08649af2, 0x4b74baa0), 798 0xe6ec52ce, 0x08649af2, 0x4b74baa0),
806 PCMCIA_DEVICE_PROD_ID123( 799 PCMCIA_DEVICE_PROD_ID123(
@@ -834,14 +827,12 @@ static struct pcmcia_device_id hostap_cs_ids[] = {
834 "Ver. 1.00", 827 "Ver. 1.00",
835 0x5cd01705, 0x4271660f, 0x9d08ee12), 828 0x5cd01705, 0x4271660f, 0x9d08ee12),
836 PCMCIA_DEVICE_PROD_ID123( 829 PCMCIA_DEVICE_PROD_ID123(
837 "corega", "WL PCCL-11", "ISL37300P",
838 0xa21501a, 0x59868926, 0xc9049a39),
839 PCMCIA_DEVICE_PROD_ID123(
840 "The Linksys Group, Inc.", "Wireless Network CF Card", "ISL37300P",
841 0xa5f472c2, 0x9c05598d, 0xc9049a39),
842 PCMCIA_DEVICE_PROD_ID123(
843 "Wireless LAN" , "11Mbps PC Card", "Version 01.02", 830 "Wireless LAN" , "11Mbps PC Card", "Version 01.02",
844 0x4b8870ff, 0x70e946d1, 0x4b74baa0), 831 0x4b8870ff, 0x70e946d1, 0x4b74baa0),
832 PCMCIA_DEVICE_PROD_ID3("HFA3863", 0x355cb092),
833 PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2),
834 PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b),
835 PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39),
845 PCMCIA_DEVICE_NULL 836 PCMCIA_DEVICE_NULL
846}; 837};
847MODULE_DEVICE_TABLE(pcmcia, hostap_cs_ids); 838MODULE_DEVICE_TABLE(pcmcia, hostap_cs_ids);
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index ff9b5c882184..d70732819423 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -2618,6 +2618,15 @@ static irqreturn_t prism2_interrupt(int irq, void *dev_id)
2618 int events = 0; 2618 int events = 0;
2619 u16 ev; 2619 u16 ev;
2620 2620
2621 /* Detect early interrupt before driver is fully configued */
2622 if (!dev->base_addr) {
2623 if (net_ratelimit()) {
2624 printk(KERN_DEBUG "%s: Interrupt, but dev not configured\n",
2625 dev->name);
2626 }
2627 return IRQ_HANDLED;
2628 }
2629
2621 iface = netdev_priv(dev); 2630 iface = netdev_priv(dev);
2622 local = iface->local; 2631 local = iface->local;
2623 2632
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index 8fdd41f4b4f2..4d97ae37499b 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -39,7 +39,7 @@ struct hostap_pci_priv {
39/* FIX: do we need mb/wmb/rmb with memory operations? */ 39/* FIX: do we need mb/wmb/rmb with memory operations? */
40 40
41 41
42static struct pci_device_id prism2_pci_id_table[] __devinitdata = { 42static DEFINE_PCI_DEVICE_TABLE(prism2_pci_id_table) = {
43 /* Intersil Prism3 ISL3872 11Mb/s WLAN Controller */ 43 /* Intersil Prism3 ISL3872 11Mb/s WLAN Controller */
44 { 0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID }, 44 { 0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID },
45 /* Intersil Prism2.5 ISL3874 11Mb/s WLAN Controller */ 45 /* Intersil Prism2.5 ISL3874 11Mb/s WLAN Controller */
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index 0e5d51086a44..fc04ccdc5bef 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -60,7 +60,7 @@ struct hostap_plx_priv {
60 60
61#define PLXDEV(vendor,dev,str) { vendor, dev, PCI_ANY_ID, PCI_ANY_ID } 61#define PLXDEV(vendor,dev,str) { vendor, dev, PCI_ANY_ID, PCI_ANY_ID }
62 62
63static struct pci_device_id prism2_plx_id_table[] __devinitdata = { 63static DEFINE_PCI_DEVICE_TABLE(prism2_plx_id_table) = {
64 PLXDEV(0x10b7, 0x7770, "3Com AirConnect PCI 777A"), 64 PLXDEV(0x10b7, 0x7770, "3Com AirConnect PCI 777A"),
65 PLXDEV(0x111a, 0x1023, "Siemens SpeedStream SS1023"), 65 PLXDEV(0x111a, 0x1023, "Siemens SpeedStream SS1023"),
66 PLXDEV(0x126c, 0x8030, "Nortel emobility"), 66 PLXDEV(0x126c, 0x8030, "Nortel emobility"),
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 56afcf041f81..9b72c45a7748 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -6585,7 +6585,7 @@ static void ipw2100_shutdown(struct pci_dev *pci_dev)
6585 6585
6586#define IPW2100_DEV_ID(x) { PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, x } 6586#define IPW2100_DEV_ID(x) { PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, x }
6587 6587
6588static struct pci_device_id ipw2100_pci_id_table[] __devinitdata = { 6588static DEFINE_PCI_DEVICE_TABLE(ipw2100_pci_id_table) = {
6589 IPW2100_DEV_ID(0x2520), /* IN 2100A mPCI 3A */ 6589 IPW2100_DEV_ID(0x2520), /* IN 2100A mPCI 3A */
6590 IPW2100_DEV_ID(0x2521), /* IN 2100A mPCI 3B */ 6590 IPW2100_DEV_ID(0x2521), /* IN 2100A mPCI 3B */
6591 IPW2100_DEV_ID(0x2524), /* IN 2100A mPCI 3B */ 6591 IPW2100_DEV_ID(0x2524), /* IN 2100A mPCI 3B */
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 09ddd3e6bedc..63c2a7ade5fb 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11524,7 +11524,7 @@ out:
11524} 11524}
11525 11525
11526/* PCI driver stuff */ 11526/* PCI driver stuff */
11527static struct pci_device_id card_ids[] = { 11527static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
11528 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0}, 11528 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11529 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0}, 11529 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11530 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0}, 11530 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index b16b06c2031f..dc8ed1527666 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,14 +1,8 @@
1config IWLWIFI 1config IWLWIFI
2 tristate "Intel Wireless Wifi" 2 tristate "Intel Wireless Wifi"
3 depends on PCI && MAC80211 && EXPERIMENTAL 3 depends on PCI && MAC80211
4 select FW_LOADER 4 select FW_LOADER
5 5
6config IWLWIFI_SPECTRUM_MEASUREMENT
7 bool "Enable Spectrum Measurement in iwlagn driver"
8 depends on IWLWIFI
9 ---help---
10 This option will enable spectrum measurement for the iwlagn driver.
11
12config IWLWIFI_DEBUG 6config IWLWIFI_DEBUG
13 bool "Enable full debugging output in iwlagn and iwl3945 drivers" 7 bool "Enable full debugging output in iwlagn and iwl3945 drivers"
14 depends on IWLWIFI 8 depends on IWLWIFI
@@ -120,9 +114,3 @@ config IWL3945
120 inserted in and removed from the running kernel whenever you want), 114 inserted in and removed from the running kernel whenever you want),
121 say M here and read <file:Documentation/kbuild/modules.txt>. The 115 say M here and read <file:Documentation/kbuild/modules.txt>. The
122 module will be called iwl3945. 116 module will be called iwl3945.
123
124config IWL3945_SPECTRUM_MEASUREMENT
125 bool "Enable Spectrum Measurement in iwl3945 driver"
126 depends on IWL3945
127 ---help---
128 This option will enable spectrum measurement for the iwl3945 driver.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 7f82044af242..4e378faee650 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -3,7 +3,6 @@ iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
3iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o iwl-calib.o 3iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o iwl-calib.o
4iwlcore-objs += iwl-scan.o iwl-led.o 4iwlcore-objs += iwl-scan.o iwl-led.o
5iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o 5iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
6iwlcore-$(CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT) += iwl-spectrum.o
7iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o 6iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
8 7
9CFLAGS_iwl-devtrace.o := -I$(src) 8CFLAGS_iwl-devtrace.o := -I$(src)
@@ -20,3 +19,5 @@ iwlagn-$(CONFIG_IWL5000) += iwl-1000.o
20# 3945 19# 3945
21obj-$(CONFIG_IWL3945) += iwl3945.o 20obj-$(CONFIG_IWL3945) += iwl3945.o
22iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o 21iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
22
23ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 8414178bcff4..3bf2e6e9b2d9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2008-2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -89,8 +89,78 @@ static void iwl1000_nic_config(struct iwl_priv *priv)
89 ~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK); 89 ~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
90} 90}
91 91
92static struct iwl_sensitivity_ranges iwl1000_sensitivity = {
93 .min_nrg_cck = 95,
94 .max_nrg_cck = 0, /* not used, set to 0 */
95 .auto_corr_min_ofdm = 90,
96 .auto_corr_min_ofdm_mrc = 170,
97 .auto_corr_min_ofdm_x1 = 120,
98 .auto_corr_min_ofdm_mrc_x1 = 240,
99
100 .auto_corr_max_ofdm = 120,
101 .auto_corr_max_ofdm_mrc = 210,
102 .auto_corr_max_ofdm_x1 = 155,
103 .auto_corr_max_ofdm_mrc_x1 = 290,
104
105 .auto_corr_min_cck = 125,
106 .auto_corr_max_cck = 200,
107 .auto_corr_min_cck_mrc = 170,
108 .auto_corr_max_cck_mrc = 400,
109 .nrg_th_cck = 95,
110 .nrg_th_ofdm = 95,
111
112 .barker_corr_th_min = 190,
113 .barker_corr_th_min_mrc = 390,
114 .nrg_th_cca = 62,
115};
116
117static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
118{
119 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
120 priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
121 priv->cfg->num_of_queues =
122 priv->cfg->mod_params->num_of_queues;
123
124 priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
125 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
126 priv->hw_params.scd_bc_tbls_size =
127 priv->cfg->num_of_queues *
128 sizeof(struct iwl5000_scd_bc_tbl);
129 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
130 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
131 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
132
133 priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
134 priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
135
136 priv->hw_params.max_bsm_size = 0;
137 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
138 BIT(IEEE80211_BAND_5GHZ);
139 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
140
141 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
142 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
143 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
144 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
145
146 if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
147 priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
148
149 /* Set initial sensitivity parameters */
150 /* Set initial calibration set */
151 priv->hw_params.sens = &iwl1000_sensitivity;
152 priv->hw_params.calib_init_cfg =
153 BIT(IWL_CALIB_XTAL) |
154 BIT(IWL_CALIB_LO) |
155 BIT(IWL_CALIB_TX_IQ) |
156 BIT(IWL_CALIB_TX_IQ_PERD) |
157 BIT(IWL_CALIB_BASE_BAND);
158
159 return 0;
160}
161
92static struct iwl_lib_ops iwl1000_lib = { 162static struct iwl_lib_ops iwl1000_lib = {
93 .set_hw_params = iwl5000_hw_set_hw_params, 163 .set_hw_params = iwl1000_hw_set_hw_params,
94 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, 164 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
95 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, 165 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
96 .txq_set_sched = iwl5000_txq_set_sched, 166 .txq_set_sched = iwl5000_txq_set_sched,
@@ -105,6 +175,8 @@ static struct iwl_lib_ops iwl1000_lib = {
105 .load_ucode = iwl5000_load_ucode, 175 .load_ucode = iwl5000_load_ucode,
106 .dump_nic_event_log = iwl_dump_nic_event_log, 176 .dump_nic_event_log = iwl_dump_nic_event_log,
107 .dump_nic_error_log = iwl_dump_nic_error_log, 177 .dump_nic_error_log = iwl_dump_nic_error_log,
178 .dump_csr = iwl_dump_csr,
179 .dump_fh = iwl_dump_fh,
108 .init_alive_start = iwl5000_init_alive_start, 180 .init_alive_start = iwl5000_init_alive_start,
109 .alive_notify = iwl5000_alive_notify, 181 .alive_notify = iwl5000_alive_notify,
110 .send_tx_power = iwl5000_send_tx_power, 182 .send_tx_power = iwl5000_send_tx_power,
@@ -138,9 +210,10 @@ static struct iwl_lib_ops iwl1000_lib = {
138 .temperature = iwl5000_temperature, 210 .temperature = iwl5000_temperature,
139 .set_ct_kill = iwl1000_set_ct_threshold, 211 .set_ct_kill = iwl1000_set_ct_threshold,
140 }, 212 },
213 .add_bcast_station = iwl_add_bcast_station,
141}; 214};
142 215
143static struct iwl_ops iwl1000_ops = { 216static const struct iwl_ops iwl1000_ops = {
144 .ucode = &iwl5000_ucode, 217 .ucode = &iwl5000_ucode,
145 .lib = &iwl1000_lib, 218 .lib = &iwl1000_lib,
146 .hcmd = &iwl5000_hcmd, 219 .hcmd = &iwl5000_hcmd,
@@ -173,7 +246,8 @@ struct iwl_cfg iwl1000_bgn_cfg = {
173 .use_rts_for_ht = true, /* use rts/cts protection */ 246 .use_rts_for_ht = true, /* use rts/cts protection */
174 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 247 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
175 .support_ct_kill_exit = true, 248 .support_ct_kill_exit = true,
176 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 249 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
250 .chain_noise_scale = 1000,
177}; 251};
178 252
179struct iwl_cfg iwl1000_bg_cfg = { 253struct iwl_cfg iwl1000_bg_cfg = {
@@ -200,6 +274,8 @@ struct iwl_cfg iwl1000_bg_cfg = {
200 .led_compensation = 51, 274 .led_compensation = 51,
201 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 275 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
202 .support_ct_kill_exit = true, 276 .support_ct_kill_exit = true,
277 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
278 .chain_noise_scale = 1000,
203}; 279};
204 280
205MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX)); 281MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
index 08ce259a0e60..042f6bc0df13 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 6fd10d443ba3..3a876a8ece38 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
index a871d09d598f..abe2b739c4dc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
index 5a1033ca7aaa..ce990adc51e7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index d4b49883b30e..47909f94271e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 234891d8cc10..303cc8193adc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -45,8 +45,8 @@
45#include "iwl-sta.h" 45#include "iwl-sta.h"
46#include "iwl-3945.h" 46#include "iwl-3945.h"
47#include "iwl-eeprom.h" 47#include "iwl-eeprom.h"
48#include "iwl-helpers.h"
49#include "iwl-core.h" 48#include "iwl-core.h"
49#include "iwl-helpers.h"
50#include "iwl-led.h" 50#include "iwl-led.h"
51#include "iwl-3945-led.h" 51#include "iwl-3945-led.h"
52 52
@@ -1951,11 +1951,7 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
1951 } 1951 }
1952 1952
1953 /* Add the broadcast address so we can send broadcast frames */ 1953 /* Add the broadcast address so we can send broadcast frames */
1954 if (iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL) == 1954 priv->cfg->ops->lib->add_bcast_station(priv);
1955 IWL_INVALID_STATION) {
1956 IWL_ERR(priv, "Error adding BROADCAST address for transmit.\n");
1957 return -EIO;
1958 }
1959 1955
1960 /* If we have set the ASSOC_MSK and we are in BSS mode then 1956 /* If we have set the ASSOC_MSK and we are in BSS mode then
1961 * add the IWL_AP_ID to the station rate table */ 1957 * add the IWL_AP_ID to the station rate table */
@@ -2474,11 +2470,9 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
2474 memset((void *)&priv->hw_params, 0, 2470 memset((void *)&priv->hw_params, 0,
2475 sizeof(struct iwl_hw_params)); 2471 sizeof(struct iwl_hw_params));
2476 2472
2477 priv->shared_virt = 2473 priv->shared_virt = dma_alloc_coherent(&priv->pci_dev->dev,
2478 pci_alloc_consistent(priv->pci_dev, 2474 sizeof(struct iwl3945_shared),
2479 sizeof(struct iwl3945_shared), 2475 &priv->shared_phys, GFP_KERNEL);
2480 &priv->shared_phys);
2481
2482 if (!priv->shared_virt) { 2476 if (!priv->shared_virt) {
2483 IWL_ERR(priv, "failed to allocate pci memory\n"); 2477 IWL_ERR(priv, "failed to allocate pci memory\n");
2484 mutex_unlock(&priv->mutex); 2478 mutex_unlock(&priv->mutex);
@@ -2796,6 +2790,7 @@ static struct iwl_lib_ops iwl3945_lib = {
2796 .post_associate = iwl3945_post_associate, 2790 .post_associate = iwl3945_post_associate,
2797 .isr = iwl_isr_legacy, 2791 .isr = iwl_isr_legacy,
2798 .config_ap = iwl3945_config_ap, 2792 .config_ap = iwl3945_config_ap,
2793 .add_bcast_station = iwl3945_add_bcast_station,
2799}; 2794};
2800 2795
2801static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { 2796static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
@@ -2804,7 +2799,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
2804 .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag, 2799 .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
2805}; 2800};
2806 2801
2807static struct iwl_ops iwl3945_ops = { 2802static const struct iwl_ops iwl3945_ops = {
2808 .ucode = &iwl3945_ucode, 2803 .ucode = &iwl3945_ucode,
2809 .lib = &iwl3945_lib, 2804 .lib = &iwl3945_lib,
2810 .hcmd = &iwl3945_hcmd, 2805 .hcmd = &iwl3945_hcmd,
@@ -2830,6 +2825,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
2830 .ht_greenfield_support = false, 2825 .ht_greenfield_support = false,
2831 .led_compensation = 64, 2826 .led_compensation = 64,
2832 .broken_powersave = true, 2827 .broken_powersave = true,
2828 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
2833}; 2829};
2834 2830
2835static struct iwl_cfg iwl3945_abg_cfg = { 2831static struct iwl_cfg iwl3945_abg_cfg = {
@@ -2847,9 +2843,10 @@ static struct iwl_cfg iwl3945_abg_cfg = {
2847 .ht_greenfield_support = false, 2843 .ht_greenfield_support = false,
2848 .led_compensation = 64, 2844 .led_compensation = 64,
2849 .broken_powersave = true, 2845 .broken_powersave = true,
2846 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
2850}; 2847};
2851 2848
2852struct pci_device_id iwl3945_hw_card_ids[] = { 2849DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
2853 {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)}, 2850 {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
2854 {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)}, 2851 {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
2855 {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)}, 2852 {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 531fa125f5a6..452dfd5456c6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -37,7 +37,7 @@
37#include <net/ieee80211_radiotap.h> 37#include <net/ieee80211_radiotap.h>
38 38
39/* Hardware specific file defines the PCI IDs table for that hardware module */ 39/* Hardware specific file defines the PCI IDs table for that hardware module */
40extern struct pci_device_id iwl3945_hw_card_ids[]; 40extern const struct pci_device_id iwl3945_hw_card_ids[];
41 41
42#include "iwl-csr.h" 42#include "iwl-csr.h"
43#include "iwl-prph.h" 43#include "iwl-prph.h"
@@ -171,24 +171,6 @@ struct iwl3945_frame {
171 171
172#define SCAN_INTERVAL 100 172#define SCAN_INTERVAL 100
173 173
174#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
175#define STATUS_HCMD_SYNC_ACTIVE 1 /* sync host command in progress */
176#define STATUS_INT_ENABLED 2
177#define STATUS_RF_KILL_HW 3
178#define STATUS_INIT 5
179#define STATUS_ALIVE 6
180#define STATUS_READY 7
181#define STATUS_TEMPERATURE 8
182#define STATUS_GEO_CONFIGURED 9
183#define STATUS_EXIT_PENDING 10
184#define STATUS_STATISTICS 12
185#define STATUS_SCANNING 13
186#define STATUS_SCAN_ABORTING 14
187#define STATUS_SCAN_HW 15
188#define STATUS_POWER_PMI 16
189#define STATUS_FW_ERROR 17
190#define STATUS_CONF_PENDING 18
191
192#define MAX_TID_COUNT 9 174#define MAX_TID_COUNT 9
193 175
194#define IWL_INVALID_RATE 0xFF 176#define IWL_INVALID_RATE 0xFF
@@ -226,7 +208,8 @@ extern void iwl3945_rx_replenish(void *data);
226extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 208extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
227extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, 209extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
228 struct ieee80211_hdr *hdr,int left); 210 struct ieee80211_hdr *hdr,int left);
229extern void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log); 211extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
212 char **buf, bool display);
230extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv); 213extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
231 214
232/* 215/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index c606366b582c..67ef562e8db1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 31462813bac0..1bd2cd836026 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -581,6 +581,13 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
581 581
582 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); 582 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
583 583
584 /* make sure all queue are not stopped */
585 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
586 for (i = 0; i < 4; i++)
587 atomic_set(&priv->queue_stop_count[i], 0);
588
589 /* reset to 0 to enable all the queue first */
590 priv->txq_ctx_active_msk = 0;
584 /* Map each Tx/cmd queue to its corresponding fifo */ 591 /* Map each Tx/cmd queue to its corresponding fifo */
585 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { 592 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
586 int ac = default_queue_to_tx_fifo[i]; 593 int ac = default_queue_to_tx_fifo[i];
@@ -2206,9 +2213,10 @@ static struct iwl_lib_ops iwl4965_lib = {
2206 .temperature = iwl4965_temperature_calib, 2213 .temperature = iwl4965_temperature_calib,
2207 .set_ct_kill = iwl4965_set_ct_threshold, 2214 .set_ct_kill = iwl4965_set_ct_threshold,
2208 }, 2215 },
2216 .add_bcast_station = iwl_add_bcast_station,
2209}; 2217};
2210 2218
2211static struct iwl_ops iwl4965_ops = { 2219static const struct iwl_ops iwl4965_ops = {
2212 .ucode = &iwl4965_ucode, 2220 .ucode = &iwl4965_ucode,
2213 .lib = &iwl4965_lib, 2221 .lib = &iwl4965_lib,
2214 .hcmd = &iwl4965_hcmd, 2222 .hcmd = &iwl4965_hcmd,
@@ -2239,7 +2247,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
2239 .broken_powersave = true, 2247 .broken_powersave = true,
2240 .led_compensation = 61, 2248 .led_compensation = 61,
2241 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, 2249 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
2242 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 2250 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
2243}; 2251};
2244 2252
2245/* Module firmware */ 2253/* Module firmware */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index bc056e9ab85f..714e032f6217 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved. 8 * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index cffaae772d51..e476acb53aa7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -179,14 +179,24 @@ static void iwl5000_gain_computation(struct iwl_priv *priv,
179 data->delta_gain_code[i] = 0; 179 data->delta_gain_code[i] = 0;
180 continue; 180 continue;
181 } 181 }
182 delta_g = (1000 * ((s32)average_noise[default_chain] - 182
183 delta_g = (priv->cfg->chain_noise_scale *
184 ((s32)average_noise[default_chain] -
183 (s32)average_noise[i])) / 1500; 185 (s32)average_noise[i])) / 1500;
186
184 /* bound gain by 2 bits value max, 3rd bit is sign */ 187 /* bound gain by 2 bits value max, 3rd bit is sign */
185 data->delta_gain_code[i] = 188 data->delta_gain_code[i] =
186 min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); 189 min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
187 190
188 if (delta_g < 0) 191 if (delta_g < 0)
189 /* set negative sign */ 192 /*
193 * set negative sign ...
194 * note to Intel developers: This is uCode API format,
195 * not the format of any internal device registers.
196 * Do not change this format for e.g. 6050 or similar
197 * devices. Change format only if more resolution
198 * (i.e. more than 2 bits magnitude) is needed.
199 */
190 data->delta_gain_code[i] |= (1 << 2); 200 data->delta_gain_code[i] |= (1 << 2);
191 } 201 }
192 202
@@ -263,8 +273,8 @@ static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
263 273
264 .auto_corr_max_ofdm = 120, 274 .auto_corr_max_ofdm = 120,
265 .auto_corr_max_ofdm_mrc = 210, 275 .auto_corr_max_ofdm_mrc = 210,
266 .auto_corr_max_ofdm_x1 = 155, 276 .auto_corr_max_ofdm_x1 = 120,
267 .auto_corr_max_ofdm_mrc_x1 = 290, 277 .auto_corr_max_ofdm_mrc_x1 = 240,
268 278
269 .auto_corr_min_cck = 125, 279 .auto_corr_min_cck = 125,
270 .auto_corr_max_cck = 200, 280 .auto_corr_max_cck = 200,
@@ -412,12 +422,14 @@ static void iwl5000_rx_calib_complete(struct iwl_priv *priv,
412/* 422/*
413 * ucode 423 * ucode
414 */ 424 */
415static int iwl5000_load_section(struct iwl_priv *priv, 425static int iwl5000_load_section(struct iwl_priv *priv, const char *name,
416 struct fw_desc *image, 426 struct fw_desc *image, u32 dst_addr)
417 u32 dst_addr)
418{ 427{
419 dma_addr_t phy_addr = image->p_addr; 428 dma_addr_t phy_addr = image->p_addr;
420 u32 byte_cnt = image->len; 429 u32 byte_cnt = image->len;
430 int ret;
431
432 priv->ucode_write_complete = 0;
421 433
422 iwl_write_direct32(priv, 434 iwl_write_direct32(priv,
423 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 435 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
@@ -447,57 +459,36 @@ static int iwl5000_load_section(struct iwl_priv *priv,
447 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 459 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
448 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 460 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
449 461
450 return 0; 462 IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
451}
452
453static int iwl5000_load_given_ucode(struct iwl_priv *priv,
454 struct fw_desc *inst_image,
455 struct fw_desc *data_image)
456{
457 int ret = 0;
458
459 ret = iwl5000_load_section(priv, inst_image,
460 IWL50_RTC_INST_LOWER_BOUND);
461 if (ret)
462 return ret;
463
464 IWL_DEBUG_INFO(priv, "INST uCode section being loaded...\n");
465 ret = wait_event_interruptible_timeout(priv->wait_command_queue, 463 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
466 priv->ucode_write_complete, 5 * HZ); 464 priv->ucode_write_complete, 5 * HZ);
467 if (ret == -ERESTARTSYS) { 465 if (ret == -ERESTARTSYS) {
468 IWL_ERR(priv, "Could not load the INST uCode section due " 466 IWL_ERR(priv, "Could not load the %s uCode section due "
469 "to interrupt\n"); 467 "to interrupt\n", name);
470 return ret; 468 return ret;
471 } 469 }
472 if (!ret) { 470 if (!ret) {
473 IWL_ERR(priv, "Could not load the INST uCode section\n"); 471 IWL_ERR(priv, "Could not load the %s uCode section\n",
472 name);
474 return -ETIMEDOUT; 473 return -ETIMEDOUT;
475 } 474 }
476 475
477 priv->ucode_write_complete = 0; 476 return 0;
478 477}
479 ret = iwl5000_load_section(
480 priv, data_image, IWL50_RTC_DATA_LOWER_BOUND);
481 if (ret)
482 return ret;
483 478
484 IWL_DEBUG_INFO(priv, "DATA uCode section being loaded...\n"); 479static int iwl5000_load_given_ucode(struct iwl_priv *priv,
480 struct fw_desc *inst_image,
481 struct fw_desc *data_image)
482{
483 int ret = 0;
485 484
486 ret = wait_event_interruptible_timeout(priv->wait_command_queue, 485 ret = iwl5000_load_section(priv, "INST", inst_image,
487 priv->ucode_write_complete, 5 * HZ); 486 IWL50_RTC_INST_LOWER_BOUND);
488 if (ret == -ERESTARTSYS) { 487 if (ret)
489 IWL_ERR(priv, "Could not load the INST uCode section due "
490 "to interrupt\n");
491 return ret; 488 return ret;
492 } else if (!ret) {
493 IWL_ERR(priv, "Could not load the DATA uCode section\n");
494 return -ETIMEDOUT;
495 } else
496 ret = 0;
497 489
498 priv->ucode_write_complete = 0; 490 return iwl5000_load_section(priv, "DATA", data_image,
499 491 IWL50_RTC_DATA_LOWER_BOUND);
500 return ret;
501} 492}
502 493
503int iwl5000_load_ucode(struct iwl_priv *priv) 494int iwl5000_load_ucode(struct iwl_priv *priv)
@@ -657,6 +648,13 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
657 648
658 iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); 649 iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
659 650
651 /* make sure all queue are not stopped */
652 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
653 for (i = 0; i < 4; i++)
654 atomic_set(&priv->queue_stop_count[i], 0);
655
656 /* reset to 0 to enable all the queue first */
657 priv->txq_ctx_active_msk = 0;
660 /* map qos queues to fifos one-to-one */ 658 /* map qos queues to fifos one-to-one */
661 for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) { 659 for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) {
662 int ac = iwl5000_default_queue_to_tx_fifo[i]; 660 int ac = iwl5000_default_queue_to_tx_fifo[i];
@@ -781,7 +779,7 @@ void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
781 779
782 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; 780 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
783 781
784 if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP) 782 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
785 scd_bc_tbl[txq_id]. 783 scd_bc_tbl[txq_id].
786 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; 784 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
787} 785}
@@ -800,12 +798,12 @@ void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
800 if (txq_id != IWL_CMD_QUEUE_NUM) 798 if (txq_id != IWL_CMD_QUEUE_NUM)
801 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id; 799 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
802 800
803 bc_ent = cpu_to_le16(1 | (sta_id << 12)); 801 bc_ent = cpu_to_le16(1 | (sta_id << 12));
804 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; 802 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
805 803
806 if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP) 804 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
807 scd_bc_tbl[txq_id]. 805 scd_bc_tbl[txq_id].
808 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; 806 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
809} 807}
810 808
811static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, 809static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
@@ -1464,6 +1462,8 @@ struct iwl_lib_ops iwl5000_lib = {
1464 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 1462 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
1465 .dump_nic_event_log = iwl_dump_nic_event_log, 1463 .dump_nic_event_log = iwl_dump_nic_event_log,
1466 .dump_nic_error_log = iwl_dump_nic_error_log, 1464 .dump_nic_error_log = iwl_dump_nic_error_log,
1465 .dump_csr = iwl_dump_csr,
1466 .dump_fh = iwl_dump_fh,
1467 .load_ucode = iwl5000_load_ucode, 1467 .load_ucode = iwl5000_load_ucode,
1468 .init_alive_start = iwl5000_init_alive_start, 1468 .init_alive_start = iwl5000_init_alive_start,
1469 .alive_notify = iwl5000_alive_notify, 1469 .alive_notify = iwl5000_alive_notify,
@@ -1499,6 +1499,7 @@ struct iwl_lib_ops iwl5000_lib = {
1499 .temperature = iwl5000_temperature, 1499 .temperature = iwl5000_temperature,
1500 .set_ct_kill = iwl5000_set_ct_threshold, 1500 .set_ct_kill = iwl5000_set_ct_threshold,
1501 }, 1501 },
1502 .add_bcast_station = iwl_add_bcast_station,
1502}; 1503};
1503 1504
1504static struct iwl_lib_ops iwl5150_lib = { 1505static struct iwl_lib_ops iwl5150_lib = {
@@ -1516,6 +1517,7 @@ static struct iwl_lib_ops iwl5150_lib = {
1516 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 1517 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
1517 .dump_nic_event_log = iwl_dump_nic_event_log, 1518 .dump_nic_event_log = iwl_dump_nic_event_log,
1518 .dump_nic_error_log = iwl_dump_nic_error_log, 1519 .dump_nic_error_log = iwl_dump_nic_error_log,
1520 .dump_csr = iwl_dump_csr,
1519 .load_ucode = iwl5000_load_ucode, 1521 .load_ucode = iwl5000_load_ucode,
1520 .init_alive_start = iwl5000_init_alive_start, 1522 .init_alive_start = iwl5000_init_alive_start,
1521 .alive_notify = iwl5000_alive_notify, 1523 .alive_notify = iwl5000_alive_notify,
@@ -1551,9 +1553,10 @@ static struct iwl_lib_ops iwl5150_lib = {
1551 .temperature = iwl5150_temperature, 1553 .temperature = iwl5150_temperature,
1552 .set_ct_kill = iwl5150_set_ct_threshold, 1554 .set_ct_kill = iwl5150_set_ct_threshold,
1553 }, 1555 },
1556 .add_bcast_station = iwl_add_bcast_station,
1554}; 1557};
1555 1558
1556static struct iwl_ops iwl5000_ops = { 1559static const struct iwl_ops iwl5000_ops = {
1557 .ucode = &iwl5000_ucode, 1560 .ucode = &iwl5000_ucode,
1558 .lib = &iwl5000_lib, 1561 .lib = &iwl5000_lib,
1559 .hcmd = &iwl5000_hcmd, 1562 .hcmd = &iwl5000_hcmd,
@@ -1561,7 +1564,7 @@ static struct iwl_ops iwl5000_ops = {
1561 .led = &iwlagn_led_ops, 1564 .led = &iwlagn_led_ops,
1562}; 1565};
1563 1566
1564static struct iwl_ops iwl5150_ops = { 1567static const struct iwl_ops iwl5150_ops = {
1565 .ucode = &iwl5000_ucode, 1568 .ucode = &iwl5000_ucode,
1566 .lib = &iwl5150_lib, 1569 .lib = &iwl5150_lib,
1567 .hcmd = &iwl5000_hcmd, 1570 .hcmd = &iwl5000_hcmd,
@@ -1598,7 +1601,8 @@ struct iwl_cfg iwl5300_agn_cfg = {
1598 .led_compensation = 51, 1601 .led_compensation = 51,
1599 .use_rts_for_ht = true, /* use rts/cts protection */ 1602 .use_rts_for_ht = true, /* use rts/cts protection */
1600 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1603 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1601 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1604 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
1605 .chain_noise_scale = 1000,
1602}; 1606};
1603 1607
1604struct iwl_cfg iwl5100_bgn_cfg = { 1608struct iwl_cfg iwl5100_bgn_cfg = {
@@ -1623,6 +1627,8 @@ struct iwl_cfg iwl5100_bgn_cfg = {
1623 .led_compensation = 51, 1627 .led_compensation = 51,
1624 .use_rts_for_ht = true, /* use rts/cts protection */ 1628 .use_rts_for_ht = true, /* use rts/cts protection */
1625 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1629 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1630 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
1631 .chain_noise_scale = 1000,
1626}; 1632};
1627 1633
1628struct iwl_cfg iwl5100_abg_cfg = { 1634struct iwl_cfg iwl5100_abg_cfg = {
@@ -1645,6 +1651,8 @@ struct iwl_cfg iwl5100_abg_cfg = {
1645 .use_bsm = false, 1651 .use_bsm = false,
1646 .led_compensation = 51, 1652 .led_compensation = 51,
1647 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1653 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1654 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
1655 .chain_noise_scale = 1000,
1648}; 1656};
1649 1657
1650struct iwl_cfg iwl5100_agn_cfg = { 1658struct iwl_cfg iwl5100_agn_cfg = {
@@ -1669,7 +1677,8 @@ struct iwl_cfg iwl5100_agn_cfg = {
1669 .led_compensation = 51, 1677 .led_compensation = 51,
1670 .use_rts_for_ht = true, /* use rts/cts protection */ 1678 .use_rts_for_ht = true, /* use rts/cts protection */
1671 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1679 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1672 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1680 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
1681 .chain_noise_scale = 1000,
1673}; 1682};
1674 1683
1675struct iwl_cfg iwl5350_agn_cfg = { 1684struct iwl_cfg iwl5350_agn_cfg = {
@@ -1694,7 +1703,8 @@ struct iwl_cfg iwl5350_agn_cfg = {
1694 .led_compensation = 51, 1703 .led_compensation = 51,
1695 .use_rts_for_ht = true, /* use rts/cts protection */ 1704 .use_rts_for_ht = true, /* use rts/cts protection */
1696 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1705 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1697 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1706 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
1707 .chain_noise_scale = 1000,
1698}; 1708};
1699 1709
1700struct iwl_cfg iwl5150_agn_cfg = { 1710struct iwl_cfg iwl5150_agn_cfg = {
@@ -1719,7 +1729,8 @@ struct iwl_cfg iwl5150_agn_cfg = {
1719 .led_compensation = 51, 1729 .led_compensation = 51,
1720 .use_rts_for_ht = true, /* use rts/cts protection */ 1730 .use_rts_for_ht = true, /* use rts/cts protection */
1721 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1731 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1722 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1732 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
1733 .chain_noise_scale = 1000,
1723}; 1734};
1724 1735
1725struct iwl_cfg iwl5150_abg_cfg = { 1736struct iwl_cfg iwl5150_abg_cfg = {
@@ -1742,6 +1753,8 @@ struct iwl_cfg iwl5150_abg_cfg = {
1742 .use_bsm = false, 1753 .use_bsm = false,
1743 .led_compensation = 51, 1754 .led_compensation = 51,
1744 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1755 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1756 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
1757 .chain_noise_scale = 1000,
1745}; 1758};
1746 1759
1747MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); 1760MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000-hw.h b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
index 90185777d98b..ddba39999997 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved. 8 * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 74e571049273..c4844adff92a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2008-2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -70,6 +70,14 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
70 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD; 70 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
71} 71}
72 72
73/* Indicate calibration version to uCode. */
74static void iwl6050_set_calib_version(struct iwl_priv *priv)
75{
76 if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6)
77 iwl_set_bit(priv, CSR_GP_DRIVER_REG,
78 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
79}
80
73/* NIC configuration for 6000 series */ 81/* NIC configuration for 6000 series */
74static void iwl6000_nic_config(struct iwl_priv *priv) 82static void iwl6000_nic_config(struct iwl_priv *priv)
75{ 83{
@@ -96,6 +104,8 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
96 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA); 104 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
97 } 105 }
98 /* else do nothing, uCode configured */ 106 /* else do nothing, uCode configured */
107 if (priv->cfg->ops->lib->temp_ops.set_calib_version)
108 priv->cfg->ops->lib->temp_ops.set_calib_version(priv);
99} 109}
100 110
101static struct iwl_sensitivity_ranges iwl6000_sensitivity = { 111static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
@@ -108,7 +118,7 @@ static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
108 118
109 .auto_corr_max_ofdm = 145, 119 .auto_corr_max_ofdm = 145,
110 .auto_corr_max_ofdm_mrc = 232, 120 .auto_corr_max_ofdm_mrc = 232,
111 .auto_corr_max_ofdm_x1 = 145, 121 .auto_corr_max_ofdm_x1 = 110,
112 .auto_corr_max_ofdm_mrc_x1 = 232, 122 .auto_corr_max_ofdm_mrc_x1 = 232,
113 123
114 .auto_corr_min_cck = 125, 124 .auto_corr_min_cck = 125,
@@ -158,11 +168,25 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
158 /* Set initial sensitivity parameters */ 168 /* Set initial sensitivity parameters */
159 /* Set initial calibration set */ 169 /* Set initial calibration set */
160 priv->hw_params.sens = &iwl6000_sensitivity; 170 priv->hw_params.sens = &iwl6000_sensitivity;
161 priv->hw_params.calib_init_cfg = 171 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
172 case CSR_HW_REV_TYPE_6x50:
173 priv->hw_params.calib_init_cfg =
174 BIT(IWL_CALIB_XTAL) |
175 BIT(IWL_CALIB_DC) |
176 BIT(IWL_CALIB_LO) |
177 BIT(IWL_CALIB_TX_IQ) |
178 BIT(IWL_CALIB_BASE_BAND);
179
180 break;
181 default:
182 priv->hw_params.calib_init_cfg =
162 BIT(IWL_CALIB_XTAL) | 183 BIT(IWL_CALIB_XTAL) |
163 BIT(IWL_CALIB_LO) | 184 BIT(IWL_CALIB_LO) |
164 BIT(IWL_CALIB_TX_IQ) | 185 BIT(IWL_CALIB_TX_IQ) |
165 BIT(IWL_CALIB_BASE_BAND); 186 BIT(IWL_CALIB_BASE_BAND);
187 break;
188 }
189
166 return 0; 190 return 0;
167} 191}
168 192
@@ -215,6 +239,8 @@ static struct iwl_lib_ops iwl6000_lib = {
215 .load_ucode = iwl5000_load_ucode, 239 .load_ucode = iwl5000_load_ucode,
216 .dump_nic_event_log = iwl_dump_nic_event_log, 240 .dump_nic_event_log = iwl_dump_nic_event_log,
217 .dump_nic_error_log = iwl_dump_nic_error_log, 241 .dump_nic_error_log = iwl_dump_nic_error_log,
242 .dump_csr = iwl_dump_csr,
243 .dump_fh = iwl_dump_fh,
218 .init_alive_start = iwl5000_init_alive_start, 244 .init_alive_start = iwl5000_init_alive_start,
219 .alive_notify = iwl5000_alive_notify, 245 .alive_notify = iwl5000_alive_notify,
220 .send_tx_power = iwl5000_send_tx_power, 246 .send_tx_power = iwl5000_send_tx_power,
@@ -250,9 +276,10 @@ static struct iwl_lib_ops iwl6000_lib = {
250 .temperature = iwl5000_temperature, 276 .temperature = iwl5000_temperature,
251 .set_ct_kill = iwl6000_set_ct_threshold, 277 .set_ct_kill = iwl6000_set_ct_threshold,
252 }, 278 },
279 .add_bcast_station = iwl_add_bcast_station,
253}; 280};
254 281
255static struct iwl_ops iwl6000_ops = { 282static const struct iwl_ops iwl6000_ops = {
256 .ucode = &iwl5000_ucode, 283 .ucode = &iwl5000_ucode,
257 .lib = &iwl6000_lib, 284 .lib = &iwl6000_lib,
258 .hcmd = &iwl5000_hcmd, 285 .hcmd = &iwl5000_hcmd,
@@ -260,18 +287,68 @@ static struct iwl_ops iwl6000_ops = {
260 .led = &iwlagn_led_ops, 287 .led = &iwlagn_led_ops,
261}; 288};
262 289
263static struct iwl_hcmd_utils_ops iwl6050_hcmd_utils = { 290static struct iwl_lib_ops iwl6050_lib = {
264 .get_hcmd_size = iwl5000_get_hcmd_size, 291 .set_hw_params = iwl6000_hw_set_hw_params,
265 .build_addsta_hcmd = iwl5000_build_addsta_hcmd, 292 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
266 .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag, 293 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
267 .calc_rssi = iwl5000_calc_rssi, 294 .txq_set_sched = iwl5000_txq_set_sched,
295 .txq_agg_enable = iwl5000_txq_agg_enable,
296 .txq_agg_disable = iwl5000_txq_agg_disable,
297 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
298 .txq_free_tfd = iwl_hw_txq_free_tfd,
299 .txq_init = iwl_hw_tx_queue_init,
300 .rx_handler_setup = iwl5000_rx_handler_setup,
301 .setup_deferred_work = iwl5000_setup_deferred_work,
302 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
303 .load_ucode = iwl5000_load_ucode,
304 .dump_nic_event_log = iwl_dump_nic_event_log,
305 .dump_nic_error_log = iwl_dump_nic_error_log,
306 .dump_csr = iwl_dump_csr,
307 .dump_fh = iwl_dump_fh,
308 .init_alive_start = iwl5000_init_alive_start,
309 .alive_notify = iwl5000_alive_notify,
310 .send_tx_power = iwl5000_send_tx_power,
311 .update_chain_flags = iwl_update_chain_flags,
312 .set_channel_switch = iwl6000_hw_channel_switch,
313 .apm_ops = {
314 .init = iwl_apm_init,
315 .stop = iwl_apm_stop,
316 .config = iwl6000_nic_config,
317 .set_pwr_src = iwl_set_pwr_src,
318 },
319 .eeprom_ops = {
320 .regulatory_bands = {
321 EEPROM_5000_REG_BAND_1_CHANNELS,
322 EEPROM_5000_REG_BAND_2_CHANNELS,
323 EEPROM_5000_REG_BAND_3_CHANNELS,
324 EEPROM_5000_REG_BAND_4_CHANNELS,
325 EEPROM_5000_REG_BAND_5_CHANNELS,
326 EEPROM_5000_REG_BAND_24_HT40_CHANNELS,
327 EEPROM_5000_REG_BAND_52_HT40_CHANNELS
328 },
329 .verify_signature = iwlcore_eeprom_verify_signature,
330 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
331 .release_semaphore = iwlcore_eeprom_release_semaphore,
332 .calib_version = iwl5000_eeprom_calib_version,
333 .query_addr = iwl5000_eeprom_query_addr,
334 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
335 },
336 .post_associate = iwl_post_associate,
337 .isr = iwl_isr_ict,
338 .config_ap = iwl_config_ap,
339 .temp_ops = {
340 .temperature = iwl5000_temperature,
341 .set_ct_kill = iwl6000_set_ct_threshold,
342 .set_calib_version = iwl6050_set_calib_version,
343 },
344 .add_bcast_station = iwl_add_bcast_station,
268}; 345};
269 346
270static struct iwl_ops iwl6050_ops = { 347static const struct iwl_ops iwl6050_ops = {
271 .ucode = &iwl5000_ucode, 348 .ucode = &iwl5000_ucode,
272 .lib = &iwl6000_lib, 349 .lib = &iwl6050_lib,
273 .hcmd = &iwl5000_hcmd, 350 .hcmd = &iwl5000_hcmd,
274 .utils = &iwl6050_hcmd_utils, 351 .utils = &iwl5000_hcmd_utils,
275 .led = &iwlagn_led_ops, 352 .led = &iwlagn_led_ops,
276}; 353};
277 354
@@ -306,7 +383,8 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
306 .supports_idle = true, 383 .supports_idle = true,
307 .adv_thermal_throttle = true, 384 .adv_thermal_throttle = true,
308 .support_ct_kill_exit = true, 385 .support_ct_kill_exit = true,
309 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 386 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
387 .chain_noise_scale = 1000,
310}; 388};
311 389
312struct iwl_cfg iwl6000i_2abg_cfg = { 390struct iwl_cfg iwl6000i_2abg_cfg = {
@@ -336,6 +414,8 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
336 .supports_idle = true, 414 .supports_idle = true,
337 .adv_thermal_throttle = true, 415 .adv_thermal_throttle = true,
338 .support_ct_kill_exit = true, 416 .support_ct_kill_exit = true,
417 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
418 .chain_noise_scale = 1000,
339}; 419};
340 420
341struct iwl_cfg iwl6000i_2bg_cfg = { 421struct iwl_cfg iwl6000i_2bg_cfg = {
@@ -365,6 +445,8 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
365 .supports_idle = true, 445 .supports_idle = true,
366 .adv_thermal_throttle = true, 446 .adv_thermal_throttle = true,
367 .support_ct_kill_exit = true, 447 .support_ct_kill_exit = true,
448 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
449 .chain_noise_scale = 1000,
368}; 450};
369 451
370struct iwl_cfg iwl6050_2agn_cfg = { 452struct iwl_cfg iwl6050_2agn_cfg = {
@@ -395,7 +477,8 @@ struct iwl_cfg iwl6050_2agn_cfg = {
395 .supports_idle = true, 477 .supports_idle = true,
396 .adv_thermal_throttle = true, 478 .adv_thermal_throttle = true,
397 .support_ct_kill_exit = true, 479 .support_ct_kill_exit = true,
398 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DYNAMIC, 480 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
481 .chain_noise_scale = 1500,
399}; 482};
400 483
401struct iwl_cfg iwl6050_2abg_cfg = { 484struct iwl_cfg iwl6050_2abg_cfg = {
@@ -425,6 +508,8 @@ struct iwl_cfg iwl6050_2abg_cfg = {
425 .supports_idle = true, 508 .supports_idle = true,
426 .adv_thermal_throttle = true, 509 .adv_thermal_throttle = true,
427 .support_ct_kill_exit = true, 510 .support_ct_kill_exit = true,
511 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
512 .chain_noise_scale = 1500,
428}; 513};
429 514
430struct iwl_cfg iwl6000_3agn_cfg = { 515struct iwl_cfg iwl6000_3agn_cfg = {
@@ -455,7 +540,8 @@ struct iwl_cfg iwl6000_3agn_cfg = {
455 .supports_idle = true, 540 .supports_idle = true,
456 .adv_thermal_throttle = true, 541 .adv_thermal_throttle = true,
457 .support_ct_kill_exit = true, 542 .support_ct_kill_exit = true,
458 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 543 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
544 .chain_noise_scale = 1000,
459}; 545};
460 546
461MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); 547MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.c b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
index 3bccba20f6da..1a24946bc203 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.h b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
index ab55f92a161d..a594e4fdc6b8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index b93e49158196..8bf7c20b9d39 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -298,10 +298,23 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
298 struct iwl_lq_sta *lq_data, u8 tid, 298 struct iwl_lq_sta *lq_data, u8 tid,
299 struct ieee80211_sta *sta) 299 struct ieee80211_sta *sta)
300{ 300{
301 int ret;
302
301 if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) { 303 if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
302 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", 304 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
303 sta->addr, tid); 305 sta->addr, tid);
304 ieee80211_start_tx_ba_session(sta, tid); 306 ret = ieee80211_start_tx_ba_session(sta, tid);
307 if (ret == -EAGAIN) {
308 /*
309 * driver and mac80211 is out of sync
310 * this might be cause by reloading firmware
311 * stop the tx ba session here
312 */
313 IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n",
314 tid);
315 ret = ieee80211_stop_tx_ba_session(sta, tid,
316 WLAN_BACK_INITIATOR);
317 }
305 } 318 }
306} 319}
307 320
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index affc0c5a2f2c..e71923961e69 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -191,7 +191,7 @@ enum {
191 IWL_RATE_2M_MASK) 191 IWL_RATE_2M_MASK)
192 192
193#define IWL_CCK_RATES_MASK \ 193#define IWL_CCK_RATES_MASK \
194 (IWL_BASIC_RATES_MASK | \ 194 (IWL_CCK_BASIC_RATES_MASK | \
195 IWL_RATE_5M_MASK | \ 195 IWL_RATE_5M_MASK | \
196 IWL_RATE_11M_MASK) 196 IWL_RATE_11M_MASK)
197 197
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 1c9866daf815..6aeb82b6992f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -73,13 +73,7 @@
73#define VD 73#define VD
74#endif 74#endif
75 75
76#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT 76#define DRV_VERSION IWLWIFI_VERSION VD
77#define VS "s"
78#else
79#define VS
80#endif
81
82#define DRV_VERSION IWLWIFI_VERSION VD VS
83 77
84 78
85MODULE_DESCRIPTION(DRV_DESCRIPTION); 79MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -203,7 +197,8 @@ int iwl_commit_rxon(struct iwl_priv *priv)
203 priv->start_calib = 0; 197 priv->start_calib = 0;
204 198
205 /* Add the broadcast address so we can send broadcast frames */ 199 /* Add the broadcast address so we can send broadcast frames */
206 iwl_add_bcast_station(priv); 200 priv->cfg->ops->lib->add_bcast_station(priv);
201
207 202
208 /* If we have set the ASSOC_MSK and we are in BSS mode then 203 /* If we have set the ASSOC_MSK and we are in BSS mode then
209 * add the IWL_AP_ID to the station rate table */ 204 * add the IWL_AP_ID to the station rate table */
@@ -657,6 +652,131 @@ static void iwl_bg_statistics_periodic(unsigned long data)
657 iwl_send_statistics_request(priv, CMD_ASYNC, false); 652 iwl_send_statistics_request(priv, CMD_ASYNC, false);
658} 653}
659 654
655
656static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
657 u32 start_idx, u32 num_events,
658 u32 mode)
659{
660 u32 i;
661 u32 ptr; /* SRAM byte address of log data */
662 u32 ev, time, data; /* event log data */
663 unsigned long reg_flags;
664
665 if (mode == 0)
666 ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
667 else
668 ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
669
670 /* Make sure device is powered up for SRAM reads */
671 spin_lock_irqsave(&priv->reg_lock, reg_flags);
672 if (iwl_grab_nic_access(priv)) {
673 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
674 return;
675 }
676
677 /* Set starting address; reads will auto-increment */
678 _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
679 rmb();
680
681 /*
682 * "time" is actually "data" for mode 0 (no timestamp).
683 * place event id # at far right for easier visual parsing.
684 */
685 for (i = 0; i < num_events; i++) {
686 ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
687 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
688 if (mode == 0) {
689 trace_iwlwifi_dev_ucode_cont_event(priv,
690 0, time, ev);
691 } else {
692 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
693 trace_iwlwifi_dev_ucode_cont_event(priv,
694 time, data, ev);
695 }
696 }
697 /* Allow device to power down */
698 iwl_release_nic_access(priv);
699 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
700}
701
702static void iwl_continuous_event_trace(struct iwl_priv *priv)
703{
704 u32 capacity; /* event log capacity in # entries */
705 u32 base; /* SRAM byte address of event log header */
706 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
707 u32 num_wraps; /* # times uCode wrapped to top of log */
708 u32 next_entry; /* index of next entry to be written by uCode */
709
710 if (priv->ucode_type == UCODE_INIT)
711 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
712 else
713 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
714 if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
715 capacity = iwl_read_targ_mem(priv, base);
716 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
717 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
718 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
719 } else
720 return;
721
722 if (num_wraps == priv->event_log.num_wraps) {
723 iwl_print_cont_event_trace(priv,
724 base, priv->event_log.next_entry,
725 next_entry - priv->event_log.next_entry,
726 mode);
727 priv->event_log.non_wraps_count++;
728 } else {
729 if ((num_wraps - priv->event_log.num_wraps) > 1)
730 priv->event_log.wraps_more_count++;
731 else
732 priv->event_log.wraps_once_count++;
733 trace_iwlwifi_dev_ucode_wrap_event(priv,
734 num_wraps - priv->event_log.num_wraps,
735 next_entry, priv->event_log.next_entry);
736 if (next_entry < priv->event_log.next_entry) {
737 iwl_print_cont_event_trace(priv, base,
738 priv->event_log.next_entry,
739 capacity - priv->event_log.next_entry,
740 mode);
741
742 iwl_print_cont_event_trace(priv, base, 0,
743 next_entry, mode);
744 } else {
745 iwl_print_cont_event_trace(priv, base,
746 next_entry, capacity - next_entry,
747 mode);
748
749 iwl_print_cont_event_trace(priv, base, 0,
750 next_entry, mode);
751 }
752 }
753 priv->event_log.num_wraps = num_wraps;
754 priv->event_log.next_entry = next_entry;
755}
756
757/**
758 * iwl_bg_ucode_trace - Timer callback to log ucode event
759 *
760 * The timer is continually set to execute every
761 * UCODE_TRACE_PERIOD milliseconds after the last timer expired
762 * this function is to perform continuous uCode event logging operation
763 * if enabled
764 */
765static void iwl_bg_ucode_trace(unsigned long data)
766{
767 struct iwl_priv *priv = (struct iwl_priv *)data;
768
769 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
770 return;
771
772 if (priv->event_log.ucode_trace) {
773 iwl_continuous_event_trace(priv);
774 /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
775 mod_timer(&priv->ucode_trace,
776 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
777 }
778}
779
660static void iwl_rx_beacon_notif(struct iwl_priv *priv, 780static void iwl_rx_beacon_notif(struct iwl_priv *priv,
661 struct iwl_rx_mem_buffer *rxb) 781 struct iwl_rx_mem_buffer *rxb)
662{ 782{
@@ -689,12 +809,14 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
689 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 809 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
690 unsigned long status = priv->status; 810 unsigned long status = priv->status;
691 811
692 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s\n", 812 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
693 (flags & HW_CARD_DISABLED) ? "Kill" : "On", 813 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
694 (flags & SW_CARD_DISABLED) ? "Kill" : "On"); 814 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
815 (flags & CT_CARD_DISABLED) ?
816 "Reached" : "Not reached");
695 817
696 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | 818 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
697 RF_CARD_DISABLED)) { 819 CT_CARD_DISABLED)) {
698 820
699 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, 821 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
700 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 822 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
@@ -708,10 +830,10 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
708 iwl_write_direct32(priv, HBUS_TARG_MBX_C, 830 iwl_write_direct32(priv, HBUS_TARG_MBX_C,
709 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); 831 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
710 } 832 }
711 if (flags & RF_CARD_DISABLED) 833 if (flags & CT_CARD_DISABLED)
712 iwl_tt_enter_ct_kill(priv); 834 iwl_tt_enter_ct_kill(priv);
713 } 835 }
714 if (!(flags & RF_CARD_DISABLED)) 836 if (!(flags & CT_CARD_DISABLED))
715 iwl_tt_exit_ct_kill(priv); 837 iwl_tt_exit_ct_kill(priv);
716 838
717 if (flags & HW_CARD_DISABLED) 839 if (flags & HW_CARD_DISABLED)
@@ -761,6 +883,8 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
761 priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive; 883 priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
762 priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error; 884 priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
763 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; 885 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
886 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
887 iwl_rx_spectrum_measure_notif;
764 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; 888 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
765 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = 889 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
766 iwl_rx_pm_debug_statistics_notif; 890 iwl_rx_pm_debug_statistics_notif;
@@ -774,7 +898,6 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
774 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_reply_statistics; 898 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_reply_statistics;
775 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics; 899 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
776 900
777 iwl_setup_spectrum_handlers(priv);
778 iwl_setup_rx_scan_handlers(priv); 901 iwl_setup_rx_scan_handlers(priv);
779 902
780 /* status change handler */ 903 /* status change handler */
@@ -1634,7 +1757,7 @@ static const char *desc_lookup_text[] = {
1634 "DEBUG_1", 1757 "DEBUG_1",
1635 "DEBUG_2", 1758 "DEBUG_2",
1636 "DEBUG_3", 1759 "DEBUG_3",
1637 "UNKNOWN" 1760 "ADVANCED SYSASSERT"
1638}; 1761};
1639 1762
1640static const char *desc_lookup(int i) 1763static const char *desc_lookup(int i)
@@ -1705,8 +1828,9 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
1705 * iwl_print_event_log - Dump error event log to syslog 1828 * iwl_print_event_log - Dump error event log to syslog
1706 * 1829 *
1707 */ 1830 */
1708static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, 1831static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1709 u32 num_events, u32 mode) 1832 u32 num_events, u32 mode,
1833 int pos, char **buf, size_t bufsz)
1710{ 1834{
1711 u32 i; 1835 u32 i;
1712 u32 base; /* SRAM byte address of event log header */ 1836 u32 base; /* SRAM byte address of event log header */
@@ -1716,7 +1840,7 @@ static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1716 unsigned long reg_flags; 1840 unsigned long reg_flags;
1717 1841
1718 if (num_events == 0) 1842 if (num_events == 0)
1719 return; 1843 return pos;
1720 if (priv->ucode_type == UCODE_INIT) 1844 if (priv->ucode_type == UCODE_INIT)
1721 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); 1845 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1722 else 1846 else
@@ -1744,27 +1868,44 @@ static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1744 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1868 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1745 if (mode == 0) { 1869 if (mode == 0) {
1746 /* data, ev */ 1870 /* data, ev */
1747 trace_iwlwifi_dev_ucode_event(priv, 0, time, ev); 1871 if (bufsz) {
1748 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev); 1872 pos += scnprintf(*buf + pos, bufsz - pos,
1873 "EVT_LOG:0x%08x:%04u\n",
1874 time, ev);
1875 } else {
1876 trace_iwlwifi_dev_ucode_event(priv, 0,
1877 time, ev);
1878 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
1879 time, ev);
1880 }
1749 } else { 1881 } else {
1750 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1882 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1751 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n", 1883 if (bufsz) {
1884 pos += scnprintf(*buf + pos, bufsz - pos,
1885 "EVT_LOGT:%010u:0x%08x:%04u\n",
1886 time, data, ev);
1887 } else {
1888 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1752 time, data, ev); 1889 time, data, ev);
1753 trace_iwlwifi_dev_ucode_event(priv, time, data, ev); 1890 trace_iwlwifi_dev_ucode_event(priv, time,
1891 data, ev);
1892 }
1754 } 1893 }
1755 } 1894 }
1756 1895
1757 /* Allow device to power down */ 1896 /* Allow device to power down */
1758 iwl_release_nic_access(priv); 1897 iwl_release_nic_access(priv);
1759 spin_unlock_irqrestore(&priv->reg_lock, reg_flags); 1898 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
1899 return pos;
1760} 1900}
1761 1901
1762/** 1902/**
1763 * iwl_print_last_event_logs - Dump the newest # of event log to syslog 1903 * iwl_print_last_event_logs - Dump the newest # of event log to syslog
1764 */ 1904 */
1765static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity, 1905static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1766 u32 num_wraps, u32 next_entry, 1906 u32 num_wraps, u32 next_entry,
1767 u32 size, u32 mode) 1907 u32 size, u32 mode,
1908 int pos, char **buf, size_t bufsz)
1768{ 1909{
1769 /* 1910 /*
1770 * display the newest DEFAULT_LOG_ENTRIES entries 1911 * display the newest DEFAULT_LOG_ENTRIES entries
@@ -1772,21 +1913,26 @@ static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1772 */ 1913 */
1773 if (num_wraps) { 1914 if (num_wraps) {
1774 if (next_entry < size) { 1915 if (next_entry < size) {
1775 iwl_print_event_log(priv, 1916 pos = iwl_print_event_log(priv,
1776 capacity - (size - next_entry), 1917 capacity - (size - next_entry),
1777 size - next_entry, mode); 1918 size - next_entry, mode,
1778 iwl_print_event_log(priv, 0, 1919 pos, buf, bufsz);
1779 next_entry, mode); 1920 pos = iwl_print_event_log(priv, 0,
1921 next_entry, mode,
1922 pos, buf, bufsz);
1780 } else 1923 } else
1781 iwl_print_event_log(priv, next_entry - size, 1924 pos = iwl_print_event_log(priv, next_entry - size,
1782 size, mode); 1925 size, mode, pos, buf, bufsz);
1783 } else { 1926 } else {
1784 if (next_entry < size) 1927 if (next_entry < size) {
1785 iwl_print_event_log(priv, 0, next_entry, mode); 1928 pos = iwl_print_event_log(priv, 0, next_entry,
1786 else 1929 mode, pos, buf, bufsz);
1787 iwl_print_event_log(priv, next_entry - size, 1930 } else {
1788 size, mode); 1931 pos = iwl_print_event_log(priv, next_entry - size,
1932 size, mode, pos, buf, bufsz);
1933 }
1789 } 1934 }
1935 return pos;
1790} 1936}
1791 1937
1792/* For sanity check only. Actual size is determined by uCode, typ. 512 */ 1938/* For sanity check only. Actual size is determined by uCode, typ. 512 */
@@ -1794,7 +1940,8 @@ static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1794 1940
1795#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20) 1941#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
1796 1942
1797void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log) 1943int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1944 char **buf, bool display)
1798{ 1945{
1799 u32 base; /* SRAM byte address of event log header */ 1946 u32 base; /* SRAM byte address of event log header */
1800 u32 capacity; /* event log capacity in # entries */ 1947 u32 capacity; /* event log capacity in # entries */
@@ -1802,6 +1949,8 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1802 u32 num_wraps; /* # times uCode wrapped to top of log */ 1949 u32 num_wraps; /* # times uCode wrapped to top of log */
1803 u32 next_entry; /* index of next entry to be written by uCode */ 1950 u32 next_entry; /* index of next entry to be written by uCode */
1804 u32 size; /* # entries that we'll print */ 1951 u32 size; /* # entries that we'll print */
1952 int pos = 0;
1953 size_t bufsz = 0;
1805 1954
1806 if (priv->ucode_type == UCODE_INIT) 1955 if (priv->ucode_type == UCODE_INIT)
1807 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); 1956 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
@@ -1812,7 +1961,7 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1812 IWL_ERR(priv, 1961 IWL_ERR(priv,
1813 "Invalid event log pointer 0x%08X for %s uCode\n", 1962 "Invalid event log pointer 0x%08X for %s uCode\n",
1814 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT"); 1963 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
1815 return; 1964 return -EINVAL;
1816 } 1965 }
1817 1966
1818 /* event log header */ 1967 /* event log header */
@@ -1838,7 +1987,7 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1838 /* bail out if nothing in log */ 1987 /* bail out if nothing in log */
1839 if (size == 0) { 1988 if (size == 0) {
1840 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n"); 1989 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1841 return; 1990 return pos;
1842 } 1991 }
1843 1992
1844#ifdef CONFIG_IWLWIFI_DEBUG 1993#ifdef CONFIG_IWLWIFI_DEBUG
@@ -1853,6 +2002,15 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1853 size); 2002 size);
1854 2003
1855#ifdef CONFIG_IWLWIFI_DEBUG 2004#ifdef CONFIG_IWLWIFI_DEBUG
2005 if (display) {
2006 if (full_log)
2007 bufsz = capacity * 48;
2008 else
2009 bufsz = size * 48;
2010 *buf = kmalloc(bufsz, GFP_KERNEL);
2011 if (!*buf)
2012 return -ENOMEM;
2013 }
1856 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { 2014 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1857 /* 2015 /*
1858 * if uCode has wrapped back to top of log, 2016 * if uCode has wrapped back to top of log,
@@ -1860,17 +2018,22 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1860 * i.e the next one that uCode would fill. 2018 * i.e the next one that uCode would fill.
1861 */ 2019 */
1862 if (num_wraps) 2020 if (num_wraps)
1863 iwl_print_event_log(priv, next_entry, 2021 pos = iwl_print_event_log(priv, next_entry,
1864 capacity - next_entry, mode); 2022 capacity - next_entry, mode,
2023 pos, buf, bufsz);
1865 /* (then/else) start at top of log */ 2024 /* (then/else) start at top of log */
1866 iwl_print_event_log(priv, 0, next_entry, mode); 2025 pos = iwl_print_event_log(priv, 0,
2026 next_entry, mode, pos, buf, bufsz);
1867 } else 2027 } else
1868 iwl_print_last_event_logs(priv, capacity, num_wraps, 2028 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
1869 next_entry, size, mode); 2029 next_entry, size, mode,
2030 pos, buf, bufsz);
1870#else 2031#else
1871 iwl_print_last_event_logs(priv, capacity, num_wraps, 2032 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
1872 next_entry, size, mode); 2033 next_entry, size, mode,
2034 pos, buf, bufsz);
1873#endif 2035#endif
2036 return pos;
1874} 2037}
1875 2038
1876/** 2039/**
@@ -2276,18 +2439,6 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
2276 return; 2439 return;
2277} 2440}
2278 2441
2279static void iwl_bg_up(struct work_struct *data)
2280{
2281 struct iwl_priv *priv = container_of(data, struct iwl_priv, up);
2282
2283 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2284 return;
2285
2286 mutex_lock(&priv->mutex);
2287 __iwl_up(priv);
2288 mutex_unlock(&priv->mutex);
2289}
2290
2291static void iwl_bg_restart(struct work_struct *data) 2442static void iwl_bg_restart(struct work_struct *data)
2292{ 2443{
2293 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); 2444 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
@@ -2304,7 +2455,13 @@ static void iwl_bg_restart(struct work_struct *data)
2304 ieee80211_restart_hw(priv->hw); 2455 ieee80211_restart_hw(priv->hw);
2305 } else { 2456 } else {
2306 iwl_down(priv); 2457 iwl_down(priv);
2307 queue_work(priv->workqueue, &priv->up); 2458
2459 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2460 return;
2461
2462 mutex_lock(&priv->mutex);
2463 __iwl_up(priv);
2464 mutex_unlock(&priv->mutex);
2308 } 2465 }
2309} 2466}
2310 2467
@@ -2440,7 +2597,7 @@ void iwl_post_associate(struct iwl_priv *priv)
2440 * Not a mac80211 entry point function, but it fits in with all the 2597 * Not a mac80211 entry point function, but it fits in with all the
2441 * other mac80211 functions grouped here. 2598 * other mac80211 functions grouped here.
2442 */ 2599 */
2443static int iwl_setup_mac(struct iwl_priv *priv) 2600static int iwl_mac_setup_register(struct iwl_priv *priv)
2444{ 2601{
2445 int ret; 2602 int ret;
2446 struct ieee80211_hw *hw = priv->hw; 2603 struct ieee80211_hw *hw = priv->hw;
@@ -2456,6 +2613,10 @@ static int iwl_setup_mac(struct iwl_priv *priv)
2456 hw->flags |= IEEE80211_HW_SUPPORTS_PS | 2613 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
2457 IEEE80211_HW_SUPPORTS_DYNAMIC_PS; 2614 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
2458 2615
2616 if (priv->cfg->sku & IWL_SKU_N)
2617 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
2618 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
2619
2459 hw->sta_data_size = sizeof(struct iwl_station_priv); 2620 hw->sta_data_size = sizeof(struct iwl_station_priv);
2460 hw->wiphy->interface_modes = 2621 hw->wiphy->interface_modes =
2461 BIT(NL80211_IFTYPE_STATION) | 2622 BIT(NL80211_IFTYPE_STATION) |
@@ -2470,7 +2631,7 @@ static int iwl_setup_mac(struct iwl_priv *priv)
2470 */ 2631 */
2471 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 2632 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
2472 2633
2473 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; 2634 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX + 1;
2474 /* we create the 802.11 header and a zero-length SSID element */ 2635 /* we create the 802.11 header and a zero-length SSID element */
2475 hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2; 2636 hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
2476 2637
@@ -2668,14 +2829,18 @@ void iwl_config_ap(struct iwl_priv *priv)
2668} 2829}
2669 2830
2670static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw, 2831static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
2671 struct ieee80211_key_conf *keyconf, const u8 *addr, 2832 struct ieee80211_vif *vif,
2672 u32 iv32, u16 *phase1key) 2833 struct ieee80211_key_conf *keyconf,
2834 struct ieee80211_sta *sta,
2835 u32 iv32, u16 *phase1key)
2673{ 2836{
2674 2837
2675 struct iwl_priv *priv = hw->priv; 2838 struct iwl_priv *priv = hw->priv;
2676 IWL_DEBUG_MAC80211(priv, "enter\n"); 2839 IWL_DEBUG_MAC80211(priv, "enter\n");
2677 2840
2678 iwl_update_tkip_key(priv, keyconf, addr, iv32, phase1key); 2841 iwl_update_tkip_key(priv, keyconf,
2842 sta ? sta->addr : iwl_bcast_addr,
2843 iv32, phase1key);
2679 2844
2680 IWL_DEBUG_MAC80211(priv, "leave\n"); 2845 IWL_DEBUG_MAC80211(priv, "leave\n");
2681} 2846}
@@ -2784,6 +2949,9 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
2784 return 0; 2949 return 0;
2785 else 2950 else
2786 return ret; 2951 return ret;
2952 case IEEE80211_AMPDU_TX_OPERATIONAL:
2953 /* do nothing */
2954 return -EOPNOTSUPP;
2787 default: 2955 default:
2788 IWL_DEBUG_HT(priv, "unknown\n"); 2956 IWL_DEBUG_HT(priv, "unknown\n");
2789 return -EINVAL; 2957 return -EINVAL;
@@ -2833,6 +3001,8 @@ static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
2833 break; 3001 break;
2834 case STA_NOTIFY_AWAKE: 3002 case STA_NOTIFY_AWAKE:
2835 WARN_ON(!sta_priv->client); 3003 WARN_ON(!sta_priv->client);
3004 if (!sta_priv->asleep)
3005 break;
2836 sta_priv->asleep = false; 3006 sta_priv->asleep = false;
2837 sta_id = iwl_find_station(priv, sta->addr); 3007 sta_id = iwl_find_station(priv, sta->addr);
2838 if (sta_id != IWL_INVALID_STATION) 3008 if (sta_id != IWL_INVALID_STATION)
@@ -3109,7 +3279,6 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
3109 3279
3110 init_waitqueue_head(&priv->wait_command_queue); 3280 init_waitqueue_head(&priv->wait_command_queue);
3111 3281
3112 INIT_WORK(&priv->up, iwl_bg_up);
3113 INIT_WORK(&priv->restart, iwl_bg_restart); 3282 INIT_WORK(&priv->restart, iwl_bg_restart);
3114 INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish); 3283 INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
3115 INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); 3284 INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
@@ -3126,6 +3295,10 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
3126 priv->statistics_periodic.data = (unsigned long)priv; 3295 priv->statistics_periodic.data = (unsigned long)priv;
3127 priv->statistics_periodic.function = iwl_bg_statistics_periodic; 3296 priv->statistics_periodic.function = iwl_bg_statistics_periodic;
3128 3297
3298 init_timer(&priv->ucode_trace);
3299 priv->ucode_trace.data = (unsigned long)priv;
3300 priv->ucode_trace.function = iwl_bg_ucode_trace;
3301
3129 if (!priv->cfg->use_isr_legacy) 3302 if (!priv->cfg->use_isr_legacy)
3130 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 3303 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3131 iwl_irq_tasklet, (unsigned long)priv); 3304 iwl_irq_tasklet, (unsigned long)priv);
@@ -3144,6 +3317,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
3144 cancel_delayed_work(&priv->alive_start); 3317 cancel_delayed_work(&priv->alive_start);
3145 cancel_work_sync(&priv->beacon_update); 3318 cancel_work_sync(&priv->beacon_update);
3146 del_timer_sync(&priv->statistics_periodic); 3319 del_timer_sync(&priv->statistics_periodic);
3320 del_timer_sync(&priv->ucode_trace);
3147} 3321}
3148 3322
3149static void iwl_init_hw_rates(struct iwl_priv *priv, 3323static void iwl_init_hw_rates(struct iwl_priv *priv,
@@ -3179,6 +3353,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
3179 INIT_LIST_HEAD(&priv->free_frames); 3353 INIT_LIST_HEAD(&priv->free_frames);
3180 3354
3181 mutex_init(&priv->mutex); 3355 mutex_init(&priv->mutex);
3356 mutex_init(&priv->sync_cmd_mutex);
3182 3357
3183 /* Clear the driver's (not device's) station table */ 3358 /* Clear the driver's (not device's) station table */
3184 iwl_clear_stations_table(priv); 3359 iwl_clear_stations_table(priv);
@@ -3188,6 +3363,14 @@ static int iwl_init_drv(struct iwl_priv *priv)
3188 priv->band = IEEE80211_BAND_2GHZ; 3363 priv->band = IEEE80211_BAND_2GHZ;
3189 3364
3190 priv->iw_mode = NL80211_IFTYPE_STATION; 3365 priv->iw_mode = NL80211_IFTYPE_STATION;
3366 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
3367 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3368
3369 /* initialize force reset */
3370 priv->force_reset[IWL_RF_RESET].reset_duration =
3371 IWL_DELAY_NEXT_FORCE_RF_RESET;
3372 priv->force_reset[IWL_FW_RESET].reset_duration =
3373 IWL_DELAY_NEXT_FORCE_FW_RELOAD;
3191 3374
3192 /* Choose which receivers/antennas to use */ 3375 /* Choose which receivers/antennas to use */
3193 if (priv->cfg->ops->hcmd->set_rxon_chain) 3376 if (priv->cfg->ops->hcmd->set_rxon_chain)
@@ -3264,7 +3447,6 @@ static struct ieee80211_ops iwl_hw_ops = {
3264 .set_key = iwl_mac_set_key, 3447 .set_key = iwl_mac_set_key,
3265 .update_tkip_key = iwl_mac_update_tkip_key, 3448 .update_tkip_key = iwl_mac_update_tkip_key,
3266 .get_stats = iwl_mac_get_stats, 3449 .get_stats = iwl_mac_get_stats,
3267 .get_tx_stats = iwl_mac_get_tx_stats,
3268 .conf_tx = iwl_mac_conf_tx, 3450 .conf_tx = iwl_mac_conf_tx,
3269 .reset_tsf = iwl_mac_reset_tsf, 3451 .reset_tsf = iwl_mac_reset_tsf,
3270 .bss_info_changed = iwl_bss_info_changed, 3452 .bss_info_changed = iwl_bss_info_changed,
@@ -3365,6 +3547,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3365 */ 3547 */
3366 spin_lock_init(&priv->reg_lock); 3548 spin_lock_init(&priv->reg_lock);
3367 spin_lock_init(&priv->lock); 3549 spin_lock_init(&priv->lock);
3550
3551 /*
3552 * stop and reset the on-board processor just in case it is in a
3553 * strange state ... like being left stranded by a primary kernel
3554 * and this is now the kdump kernel trying to start up
3555 */
3556 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3557
3368 iwl_hw_detect(priv); 3558 iwl_hw_detect(priv);
3369 IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n", 3559 IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n",
3370 priv->cfg->name, priv->hw_rev); 3560 priv->cfg->name, priv->hw_rev);
@@ -3439,9 +3629,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3439 iwl_setup_deferred_work(priv); 3629 iwl_setup_deferred_work(priv);
3440 iwl_setup_rx_handlers(priv); 3630 iwl_setup_rx_handlers(priv);
3441 3631
3442 /********************************** 3632 /*********************************************
3443 * 8. Setup and register mac80211 3633 * 8. Enable interrupts and read RFKILL state
3444 **********************************/ 3634 *********************************************/
3445 3635
3446 /* enable interrupts if needed: hw bug w/a */ 3636 /* enable interrupts if needed: hw bug w/a */
3447 pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd); 3637 pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
@@ -3452,14 +3642,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3452 3642
3453 iwl_enable_interrupts(priv); 3643 iwl_enable_interrupts(priv);
3454 3644
3455 err = iwl_setup_mac(priv);
3456 if (err)
3457 goto out_remove_sysfs;
3458
3459 err = iwl_dbgfs_register(priv, DRV_NAME);
3460 if (err)
3461 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
3462
3463 /* If platform's RF_KILL switch is NOT set to KILL */ 3645 /* If platform's RF_KILL switch is NOT set to KILL */
3464 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) 3646 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
3465 clear_bit(STATUS_RF_KILL_HW, &priv->status); 3647 clear_bit(STATUS_RF_KILL_HW, &priv->status);
@@ -3471,6 +3653,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3471 3653
3472 iwl_power_initialize(priv); 3654 iwl_power_initialize(priv);
3473 iwl_tt_initialize(priv); 3655 iwl_tt_initialize(priv);
3656
3657 /**************************************************
3658 * 9. Setup and register with mac80211 and debugfs
3659 **************************************************/
3660 err = iwl_mac_setup_register(priv);
3661 if (err)
3662 goto out_remove_sysfs;
3663
3664 err = iwl_dbgfs_register(priv, DRV_NAME);
3665 if (err)
3666 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
3667
3474 return 0; 3668 return 0;
3475 3669
3476 out_remove_sysfs: 3670 out_remove_sysfs:
@@ -3589,7 +3783,7 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
3589 *****************************************************************************/ 3783 *****************************************************************************/
3590 3784
3591/* Hardware specific file defines the PCI IDs table for that hardware module */ 3785/* Hardware specific file defines the PCI IDs table for that hardware module */
3592static struct pci_device_id iwl_hw_card_ids[] = { 3786static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
3593#ifdef CONFIG_IWL4965 3787#ifdef CONFIG_IWL4965
3594 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)}, 3788 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
3595 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)}, 3789 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index 95a57b36a7ea..845831ac053e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -414,7 +414,6 @@ static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
414/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ 414/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
415static int iwl_sensitivity_write(struct iwl_priv *priv) 415static int iwl_sensitivity_write(struct iwl_priv *priv)
416{ 416{
417 int ret = 0;
418 struct iwl_sensitivity_cmd cmd ; 417 struct iwl_sensitivity_cmd cmd ;
419 struct iwl_sensitivity_data *data = NULL; 418 struct iwl_sensitivity_data *data = NULL;
420 struct iwl_host_cmd cmd_out = { 419 struct iwl_host_cmd cmd_out = {
@@ -477,11 +476,7 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
477 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]), 476 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
478 sizeof(u16)*HD_TABLE_SIZE); 477 sizeof(u16)*HD_TABLE_SIZE);
479 478
480 ret = iwl_send_cmd(priv, &cmd_out); 479 return iwl_send_cmd(priv, &cmd_out);
481 if (ret)
482 IWL_ERR(priv, "SENSITIVITY_CMD failed\n");
483
484 return ret;
485} 480}
486 481
487void iwl_init_sensitivity(struct iwl_priv *priv) 482void iwl_init_sensitivity(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.h b/drivers/net/wireless/iwlwifi/iwl-calib.h
index b6cef989a796..2b7b1df83ba0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index e91507531923..6383d9f8c9b3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -120,7 +120,6 @@ enum {
120 CALIBRATION_COMPLETE_NOTIFICATION = 0x67, 120 CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
121 121
122 /* 802.11h related */ 122 /* 802.11h related */
123 RADAR_NOTIFICATION = 0x70, /* not used */
124 REPLY_QUIET_CMD = 0x71, /* not used */ 123 REPLY_QUIET_CMD = 0x71, /* not used */
125 REPLY_CHANNEL_SWITCH = 0x72, 124 REPLY_CHANNEL_SWITCH = 0x72,
126 CHANNEL_SWITCH_NOTIFICATION = 0x73, 125 CHANNEL_SWITCH_NOTIFICATION = 0x73,
@@ -2248,10 +2247,22 @@ struct iwl_link_quality_cmd {
2248 __le32 reserved2; 2247 __le32 reserved2;
2249} __attribute__ ((packed)); 2248} __attribute__ ((packed));
2250 2249
2250/*
2251 * BT configuration enable flags:
2252 * bit 0 - 1: BT channel announcement enabled
2253 * 0: disable
2254 * bit 1 - 1: priority of BT device enabled
2255 * 0: disable
2256 * bit 2 - 1: BT 2 wire support enabled
2257 * 0: disable
2258 */
2259#define BT_COEX_DISABLE (0x0)
2260#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0)
2261#define BT_ENABLE_PRIORITY BIT(1)
2262#define BT_ENABLE_2_WIRE BIT(2)
2263
2251#define BT_COEX_DISABLE (0x0) 2264#define BT_COEX_DISABLE (0x0)
2252#define BT_COEX_MODE_2W (0x1) 2265#define BT_COEX_ENABLE (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY)
2253#define BT_COEX_MODE_3W (0x2)
2254#define BT_COEX_MODE_4W (0x3)
2255 2266
2256#define BT_LEAD_TIME_MIN (0x0) 2267#define BT_LEAD_TIME_MIN (0x0)
2257#define BT_LEAD_TIME_DEF (0x1E) 2268#define BT_LEAD_TIME_DEF (0x1E)
@@ -2510,7 +2521,7 @@ struct iwl_card_state_notif {
2510 2521
2511#define HW_CARD_DISABLED 0x01 2522#define HW_CARD_DISABLED 0x01
2512#define SW_CARD_DISABLED 0x02 2523#define SW_CARD_DISABLED 0x02
2513#define RF_CARD_DISABLED 0x04 2524#define CT_CARD_DISABLED 0x04
2514#define RXON_CARD_DISABLED 0x10 2525#define RXON_CARD_DISABLED 0x10
2515 2526
2516struct iwl_ct_kill_config { 2527struct iwl_ct_kill_config {
@@ -2612,6 +2623,7 @@ struct iwl_ssid_ie {
2612#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF) 2623#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
2613#define IWL_GOOD_CRC_TH cpu_to_le16(1) 2624#define IWL_GOOD_CRC_TH cpu_to_le16(1)
2614#define IWL_MAX_SCAN_SIZE 1024 2625#define IWL_MAX_SCAN_SIZE 1024
2626#define IWL_MAX_CMD_SIZE 4096
2615#define IWL_MAX_PROBE_REQUEST 200 2627#define IWL_MAX_PROBE_REQUEST 200
2616 2628
2617/* 2629/*
@@ -2984,7 +2996,7 @@ struct statistics_rx_ht_phy {
2984 __le32 agg_crc32_good; 2996 __le32 agg_crc32_good;
2985 __le32 agg_mpdu_cnt; 2997 __le32 agg_mpdu_cnt;
2986 __le32 agg_cnt; 2998 __le32 agg_cnt;
2987 __le32 reserved2; 2999 __le32 unsupport_mcs;
2988} __attribute__ ((packed)); 3000} __attribute__ ((packed));
2989 3001
2990#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1) 3002#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
@@ -3087,8 +3099,8 @@ struct statistics_div {
3087} __attribute__ ((packed)); 3099} __attribute__ ((packed));
3088 3100
3089struct statistics_general { 3101struct statistics_general {
3090 __le32 temperature; 3102 __le32 temperature; /* radio temperature */
3091 __le32 temperature_m; 3103 __le32 temperature_m; /* for 5000 and up, this is radio voltage */
3092 struct statistics_dbg dbg; 3104 struct statistics_dbg dbg;
3093 __le32 sleep_time; 3105 __le32 sleep_time;
3094 __le32 slots_out; 3106 __le32 slots_out;
@@ -3096,7 +3108,12 @@ struct statistics_general {
3096 __le32 ttl_timestamp; 3108 __le32 ttl_timestamp;
3097 struct statistics_div div; 3109 struct statistics_div div;
3098 __le32 rx_enable_counter; 3110 __le32 rx_enable_counter;
3099 __le32 reserved1; 3111 /*
3112 * num_of_sos_states:
3113 * count the number of times we have to re-tune
3114 * in order to get out of bad PHY status
3115 */
3116 __le32 num_of_sos_states;
3100 __le32 reserved2; 3117 __le32 reserved2;
3101 __le32 reserved3; 3118 __le32 reserved3;
3102} __attribute__ ((packed)); 3119} __attribute__ ((packed));
@@ -3161,13 +3178,30 @@ struct iwl_notif_statistics {
3161 3178
3162/* 3179/*
3163 * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command) 3180 * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
3181 *
3182 * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
3183 * in regardless of how many missed beacons, which mean when driver receive the
3184 * notification, inside the command, it can find all the beacons information
3185 * which include number of total missed beacons, number of consecutive missed
3186 * beacons, number of beacons received and number of beacons expected to
3187 * receive.
3188 *
3189 * If uCode detected consecutive_missed_beacons > 5, it will reset the radio
3190 * in order to bring the radio/PHY back to working state; which has no relation
3191 * to when driver will perform sensitivity calibration.
3192 *
3193 * Driver should set it own missed_beacon_threshold to decide when to perform
3194 * sensitivity calibration based on number of consecutive missed beacons in
3195 * order to improve overall performance, especially in noisy environment.
3196 *
3164 */ 3197 */
3165/* if ucode missed CONSECUTIVE_MISSED_BCONS_TH beacons in a row, 3198
3166 * then this notification will be sent. */ 3199#define IWL_MISSED_BEACON_THRESHOLD_MIN (1)
3167#define CONSECUTIVE_MISSED_BCONS_TH 20 3200#define IWL_MISSED_BEACON_THRESHOLD_DEF (5)
3201#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF
3168 3202
3169struct iwl_missed_beacon_notif { 3203struct iwl_missed_beacon_notif {
3170 __le32 consequtive_missed_beacons; 3204 __le32 consecutive_missed_beacons;
3171 __le32 total_missed_becons; 3205 __le32 total_missed_becons;
3172 __le32 num_expected_beacons; 3206 __le32 num_expected_beacons;
3173 __le32 num_recvd_beacons; 3207 __le32 num_recvd_beacons;
@@ -3437,11 +3471,7 @@ enum {
3437 IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7, 3471 IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
3438 IWL_PHY_CALIBRATE_DC_CMD = 8, 3472 IWL_PHY_CALIBRATE_DC_CMD = 8,
3439 IWL_PHY_CALIBRATE_LO_CMD = 9, 3473 IWL_PHY_CALIBRATE_LO_CMD = 9,
3440 IWL_PHY_CALIBRATE_RX_BB_CMD = 10,
3441 IWL_PHY_CALIBRATE_TX_IQ_CMD = 11, 3474 IWL_PHY_CALIBRATE_TX_IQ_CMD = 11,
3442 IWL_PHY_CALIBRATE_RX_IQ_CMD = 12,
3443 IWL_PHY_CALIBRATION_NOISE_CMD = 13,
3444 IWL_PHY_CALIBRATE_AGC_TABLE_CMD = 14,
3445 IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15, 3475 IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15,
3446 IWL_PHY_CALIBRATE_BASE_BAND_CMD = 16, 3476 IWL_PHY_CALIBRATE_BASE_BAND_CMD = 16,
3447 IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17, 3477 IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17,
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index f36f804804fc..112149e9b31e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -47,6 +47,26 @@ MODULE_VERSION(IWLWIFI_VERSION);
47MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 47MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
48MODULE_LICENSE("GPL"); 48MODULE_LICENSE("GPL");
49 49
50/*
51 * set bt_coex_active to true, uCode will do kill/defer
52 * every time the priority line is asserted (BT is sending signals on the
53 * priority line in the PCIx).
54 * set bt_coex_active to false, uCode will ignore the BT activity and
55 * perform the normal operation
56 *
57 * User might experience transmit issue on some platform due to WiFi/BT
58 * co-exist problem. The possible behaviors are:
59 * Able to scan and finding all the available AP
60 * Not able to associate with any AP
61 * On those platforms, WiFi communication can be restored by set
62 * "bt_coex_active" module parameter to "false"
63 *
64 * default: bt_coex_active = true (BT_COEX_ENABLE)
65 */
66static bool bt_coex_active = true;
67module_param(bt_coex_active, bool, S_IRUGO);
68MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist\n");
69
50static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = { 70static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
51 {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP, 71 {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
52 0, COEX_UNASSOC_IDLE_FLAGS}, 72 0, COEX_UNASSOC_IDLE_FLAGS},
@@ -257,8 +277,8 @@ int iwl_hw_nic_init(struct iwl_priv *priv)
257 spin_lock_irqsave(&priv->lock, flags); 277 spin_lock_irqsave(&priv->lock, flags);
258 priv->cfg->ops->lib->apm_ops.init(priv); 278 priv->cfg->ops->lib->apm_ops.init(priv);
259 279
260 /* Set interrupt coalescing timer to 512 usecs */ 280 /* Set interrupt coalescing calibration timer to default (512 usecs) */
261 iwl_write8(priv, CSR_INT_COALESCING, 512 / 32); 281 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
262 282
263 spin_unlock_irqrestore(&priv->lock, flags); 283 spin_unlock_irqrestore(&priv->lock, flags);
264 284
@@ -450,8 +470,6 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
450 if (priv->cfg->ht_greenfield_support) 470 if (priv->cfg->ht_greenfield_support)
451 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD; 471 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
452 ht_info->cap |= IEEE80211_HT_CAP_SGI_20; 472 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
453 ht_info->cap |= (IEEE80211_HT_CAP_SM_PS &
454 (priv->cfg->sm_ps_mode << 2));
455 max_bit_rate = MAX_BIT_RATE_20_MHZ; 473 max_bit_rate = MAX_BIT_RATE_20_MHZ;
456 if (priv->hw_params.ht40_channel & BIT(band)) { 474 if (priv->hw_params.ht40_channel & BIT(band)) {
457 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 475 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
@@ -636,7 +654,7 @@ EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag);
636 654
637static bool is_single_rx_stream(struct iwl_priv *priv) 655static bool is_single_rx_stream(struct iwl_priv *priv)
638{ 656{
639 return !priv->current_ht_config.is_ht || 657 return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
640 priv->current_ht_config.single_chain_sufficient; 658 priv->current_ht_config.single_chain_sufficient;
641} 659}
642 660
@@ -1003,28 +1021,18 @@ static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
1003 */ 1021 */
1004static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt) 1022static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
1005{ 1023{
1006 int idle_cnt = active_cnt; 1024 /* # Rx chains when idling, depending on SMPS mode */
1007 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); 1025 switch (priv->current_ht_config.smps) {
1008 1026 case IEEE80211_SMPS_STATIC:
1009 /* # Rx chains when idling and maybe trying to save power */ 1027 case IEEE80211_SMPS_DYNAMIC:
1010 switch (priv->cfg->sm_ps_mode) { 1028 return IWL_NUM_IDLE_CHAINS_SINGLE;
1011 case WLAN_HT_CAP_SM_PS_STATIC: 1029 case IEEE80211_SMPS_OFF:
1012 idle_cnt = (is_cam) ? active_cnt : IWL_NUM_IDLE_CHAINS_SINGLE; 1030 return active_cnt;
1013 break;
1014 case WLAN_HT_CAP_SM_PS_DYNAMIC:
1015 idle_cnt = (is_cam) ? IWL_NUM_IDLE_CHAINS_DUAL :
1016 IWL_NUM_IDLE_CHAINS_SINGLE;
1017 break;
1018 case WLAN_HT_CAP_SM_PS_DISABLED:
1019 break;
1020 case WLAN_HT_CAP_SM_PS_INVALID:
1021 default: 1031 default:
1022 IWL_ERR(priv, "invalid sm_ps mode %u\n", 1032 WARN(1, "invalid SMPS mode %d",
1023 priv->cfg->sm_ps_mode); 1033 priv->current_ht_config.smps);
1024 WARN_ON(1); 1034 return active_cnt;
1025 break;
1026 } 1035 }
1027 return idle_cnt;
1028} 1036}
1029 1037
1030/* up to 4 chains */ 1038/* up to 4 chains */
@@ -1363,7 +1371,11 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
1363 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 1371 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1364 1372
1365 priv->cfg->ops->lib->dump_nic_error_log(priv); 1373 priv->cfg->ops->lib->dump_nic_error_log(priv);
1366 priv->cfg->ops->lib->dump_nic_event_log(priv, false); 1374 if (priv->cfg->ops->lib->dump_csr)
1375 priv->cfg->ops->lib->dump_csr(priv);
1376 if (priv->cfg->ops->lib->dump_fh)
1377 priv->cfg->ops->lib->dump_fh(priv, NULL, false);
1378 priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
1367#ifdef CONFIG_IWLWIFI_DEBUG 1379#ifdef CONFIG_IWLWIFI_DEBUG
1368 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) 1380 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
1369 iwl_print_rx_config_cmd(priv); 1381 iwl_print_rx_config_cmd(priv);
@@ -1658,9 +1670,9 @@ EXPORT_SYMBOL(iwl_set_tx_power);
1658void iwl_free_isr_ict(struct iwl_priv *priv) 1670void iwl_free_isr_ict(struct iwl_priv *priv)
1659{ 1671{
1660 if (priv->ict_tbl_vir) { 1672 if (priv->ict_tbl_vir) {
1661 pci_free_consistent(priv->pci_dev, (sizeof(u32) * ICT_COUNT) + 1673 dma_free_coherent(&priv->pci_dev->dev,
1662 PAGE_SIZE, priv->ict_tbl_vir, 1674 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
1663 priv->ict_tbl_dma); 1675 priv->ict_tbl_vir, priv->ict_tbl_dma);
1664 priv->ict_tbl_vir = NULL; 1676 priv->ict_tbl_vir = NULL;
1665 } 1677 }
1666} 1678}
@@ -1676,9 +1688,9 @@ int iwl_alloc_isr_ict(struct iwl_priv *priv)
1676 if (priv->cfg->use_isr_legacy) 1688 if (priv->cfg->use_isr_legacy)
1677 return 0; 1689 return 0;
1678 /* allocate shrared data table */ 1690 /* allocate shrared data table */
1679 priv->ict_tbl_vir = pci_alloc_consistent(priv->pci_dev, (sizeof(u32) * 1691 priv->ict_tbl_vir = dma_alloc_coherent(&priv->pci_dev->dev,
1680 ICT_COUNT) + PAGE_SIZE, 1692 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
1681 &priv->ict_tbl_dma); 1693 &priv->ict_tbl_dma, GFP_KERNEL);
1682 if (!priv->ict_tbl_vir) 1694 if (!priv->ict_tbl_vir)
1683 return -ENOMEM; 1695 return -ENOMEM;
1684 1696
@@ -1813,6 +1825,16 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
1813 if (val == 0xffffffff) 1825 if (val == 0xffffffff)
1814 val = 0; 1826 val = 0;
1815 1827
1828 /*
1829 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1830 * (bit 15 before shifting it to 31) to clear when using interrupt
1831 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1832 * so we use them to decide on the real state of the Rx bit.
1833 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1834 */
1835 if (val & 0xC0000)
1836 val |= 0x8000;
1837
1816 inta = (0xff & val) | ((0xff00 & val) << 16); 1838 inta = (0xff & val) | ((0xff00 & val) << 16);
1817 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n", 1839 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
1818 inta, inta_mask, val); 1840 inta, inta_mask, val);
@@ -1975,13 +1997,20 @@ EXPORT_SYMBOL(iwl_isr_legacy);
1975int iwl_send_bt_config(struct iwl_priv *priv) 1997int iwl_send_bt_config(struct iwl_priv *priv)
1976{ 1998{
1977 struct iwl_bt_cmd bt_cmd = { 1999 struct iwl_bt_cmd bt_cmd = {
1978 .flags = BT_COEX_MODE_4W,
1979 .lead_time = BT_LEAD_TIME_DEF, 2000 .lead_time = BT_LEAD_TIME_DEF,
1980 .max_kill = BT_MAX_KILL_DEF, 2001 .max_kill = BT_MAX_KILL_DEF,
1981 .kill_ack_mask = 0, 2002 .kill_ack_mask = 0,
1982 .kill_cts_mask = 0, 2003 .kill_cts_mask = 0,
1983 }; 2004 };
1984 2005
2006 if (!bt_coex_active)
2007 bt_cmd.flags = BT_COEX_DISABLE;
2008 else
2009 bt_cmd.flags = BT_COEX_ENABLE;
2010
2011 IWL_DEBUG_INFO(priv, "BT coex %s\n",
2012 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
2013
1985 return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, 2014 return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1986 sizeof(struct iwl_bt_cmd), &bt_cmd); 2015 sizeof(struct iwl_bt_cmd), &bt_cmd);
1987} 2016}
@@ -2599,44 +2628,43 @@ int iwl_set_mode(struct iwl_priv *priv, int mode)
2599EXPORT_SYMBOL(iwl_set_mode); 2628EXPORT_SYMBOL(iwl_set_mode);
2600 2629
2601int iwl_mac_add_interface(struct ieee80211_hw *hw, 2630int iwl_mac_add_interface(struct ieee80211_hw *hw,
2602 struct ieee80211_if_init_conf *conf) 2631 struct ieee80211_vif *vif)
2603{ 2632{
2604 struct iwl_priv *priv = hw->priv; 2633 struct iwl_priv *priv = hw->priv;
2605 unsigned long flags; 2634 int err = 0;
2606 2635
2607 IWL_DEBUG_MAC80211(priv, "enter: type %d\n", conf->type); 2636 IWL_DEBUG_MAC80211(priv, "enter: type %d\n", vif->type);
2637
2638 mutex_lock(&priv->mutex);
2608 2639
2609 if (priv->vif) { 2640 if (priv->vif) {
2610 IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n"); 2641 IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
2611 return -EOPNOTSUPP; 2642 err = -EOPNOTSUPP;
2643 goto out;
2612 } 2644 }
2613 2645
2614 spin_lock_irqsave(&priv->lock, flags); 2646 priv->vif = vif;
2615 priv->vif = conf->vif; 2647 priv->iw_mode = vif->type;
2616 priv->iw_mode = conf->type;
2617
2618 spin_unlock_irqrestore(&priv->lock, flags);
2619
2620 mutex_lock(&priv->mutex);
2621 2648
2622 if (conf->mac_addr) { 2649 if (vif->addr) {
2623 IWL_DEBUG_MAC80211(priv, "Set %pM\n", conf->mac_addr); 2650 IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr);
2624 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN); 2651 memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
2625 } 2652 }
2626 2653
2627 if (iwl_set_mode(priv, conf->type) == -EAGAIN) 2654 if (iwl_set_mode(priv, vif->type) == -EAGAIN)
2628 /* we are not ready, will run again when ready */ 2655 /* we are not ready, will run again when ready */
2629 set_bit(STATUS_MODE_PENDING, &priv->status); 2656 set_bit(STATUS_MODE_PENDING, &priv->status);
2630 2657
2658 out:
2631 mutex_unlock(&priv->mutex); 2659 mutex_unlock(&priv->mutex);
2632 2660
2633 IWL_DEBUG_MAC80211(priv, "leave\n"); 2661 IWL_DEBUG_MAC80211(priv, "leave\n");
2634 return 0; 2662 return err;
2635} 2663}
2636EXPORT_SYMBOL(iwl_mac_add_interface); 2664EXPORT_SYMBOL(iwl_mac_add_interface);
2637 2665
2638void iwl_mac_remove_interface(struct ieee80211_hw *hw, 2666void iwl_mac_remove_interface(struct ieee80211_hw *hw,
2639 struct ieee80211_if_init_conf *conf) 2667 struct ieee80211_vif *vif)
2640{ 2668{
2641 struct iwl_priv *priv = hw->priv; 2669 struct iwl_priv *priv = hw->priv;
2642 2670
@@ -2649,7 +2677,7 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
2649 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2677 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2650 iwlcore_commit_rxon(priv); 2678 iwlcore_commit_rxon(priv);
2651 } 2679 }
2652 if (priv->vif == conf->vif) { 2680 if (priv->vif == vif) {
2653 priv->vif = NULL; 2681 priv->vif = NULL;
2654 memset(priv->bssid, 0, ETH_ALEN); 2682 memset(priv->bssid, 0, ETH_ALEN);
2655 } 2683 }
@@ -2689,6 +2717,21 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2689 IWL_DEBUG_MAC80211(priv, "leave - scanning\n"); 2717 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
2690 } 2718 }
2691 2719
2720 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
2721 IEEE80211_CONF_CHANGE_CHANNEL)) {
2722 /* mac80211 uses static for non-HT which is what we want */
2723 priv->current_ht_config.smps = conf->smps_mode;
2724
2725 /*
2726 * Recalculate chain counts.
2727 *
2728 * If monitor mode is enabled then mac80211 will
2729 * set up the SM PS mode to OFF if an HT channel is
2730 * configured.
2731 */
2732 if (priv->cfg->ops->hcmd->set_rxon_chain)
2733 priv->cfg->ops->hcmd->set_rxon_chain(priv);
2734 }
2692 2735
2693 /* during scanning mac80211 will delay channel setting until 2736 /* during scanning mac80211 will delay channel setting until
2694 * scan finish with changed = 0 2737 * scan finish with changed = 0
@@ -2786,10 +2829,6 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2786 iwl_set_tx_power(priv, conf->power_level, false); 2829 iwl_set_tx_power(priv, conf->power_level, false);
2787 } 2830 }
2788 2831
2789 /* call to ensure that 4965 rx_chain is set properly in monitor mode */
2790 if (priv->cfg->ops->hcmd->set_rxon_chain)
2791 priv->cfg->ops->hcmd->set_rxon_chain(priv);
2792
2793 if (!iwl_is_ready(priv)) { 2832 if (!iwl_is_ready(priv)) {
2794 IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); 2833 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2795 goto out; 2834 goto out;
@@ -2812,42 +2851,6 @@ out:
2812} 2851}
2813EXPORT_SYMBOL(iwl_mac_config); 2852EXPORT_SYMBOL(iwl_mac_config);
2814 2853
2815int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
2816 struct ieee80211_tx_queue_stats *stats)
2817{
2818 struct iwl_priv *priv = hw->priv;
2819 int i, avail;
2820 struct iwl_tx_queue *txq;
2821 struct iwl_queue *q;
2822 unsigned long flags;
2823
2824 IWL_DEBUG_MAC80211(priv, "enter\n");
2825
2826 if (!iwl_is_ready_rf(priv)) {
2827 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
2828 return -EIO;
2829 }
2830
2831 spin_lock_irqsave(&priv->lock, flags);
2832
2833 for (i = 0; i < AC_NUM; i++) {
2834 txq = &priv->txq[i];
2835 q = &txq->q;
2836 avail = iwl_queue_space(q);
2837
2838 stats[i].len = q->n_window - avail;
2839 stats[i].limit = q->n_window - q->high_mark;
2840 stats[i].count = q->n_window;
2841
2842 }
2843 spin_unlock_irqrestore(&priv->lock, flags);
2844
2845 IWL_DEBUG_MAC80211(priv, "leave\n");
2846
2847 return 0;
2848}
2849EXPORT_SYMBOL(iwl_mac_get_tx_stats);
2850
2851void iwl_mac_reset_tsf(struct ieee80211_hw *hw) 2854void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2852{ 2855{
2853 struct iwl_priv *priv = hw->priv; 2856 struct iwl_priv *priv = hw->priv;
@@ -3197,6 +3200,207 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
3197EXPORT_SYMBOL(iwl_update_stats); 3200EXPORT_SYMBOL(iwl_update_stats);
3198#endif 3201#endif
3199 3202
3203const static char *get_csr_string(int cmd)
3204{
3205 switch (cmd) {
3206 IWL_CMD(CSR_HW_IF_CONFIG_REG);
3207 IWL_CMD(CSR_INT_COALESCING);
3208 IWL_CMD(CSR_INT);
3209 IWL_CMD(CSR_INT_MASK);
3210 IWL_CMD(CSR_FH_INT_STATUS);
3211 IWL_CMD(CSR_GPIO_IN);
3212 IWL_CMD(CSR_RESET);
3213 IWL_CMD(CSR_GP_CNTRL);
3214 IWL_CMD(CSR_HW_REV);
3215 IWL_CMD(CSR_EEPROM_REG);
3216 IWL_CMD(CSR_EEPROM_GP);
3217 IWL_CMD(CSR_OTP_GP_REG);
3218 IWL_CMD(CSR_GIO_REG);
3219 IWL_CMD(CSR_GP_UCODE_REG);
3220 IWL_CMD(CSR_GP_DRIVER_REG);
3221 IWL_CMD(CSR_UCODE_DRV_GP1);
3222 IWL_CMD(CSR_UCODE_DRV_GP2);
3223 IWL_CMD(CSR_LED_REG);
3224 IWL_CMD(CSR_DRAM_INT_TBL_REG);
3225 IWL_CMD(CSR_GIO_CHICKEN_BITS);
3226 IWL_CMD(CSR_ANA_PLL_CFG);
3227 IWL_CMD(CSR_HW_REV_WA_REG);
3228 IWL_CMD(CSR_DBG_HPET_MEM_REG);
3229 default:
3230 return "UNKNOWN";
3231
3232 }
3233}
3234
3235void iwl_dump_csr(struct iwl_priv *priv)
3236{
3237 int i;
3238 u32 csr_tbl[] = {
3239 CSR_HW_IF_CONFIG_REG,
3240 CSR_INT_COALESCING,
3241 CSR_INT,
3242 CSR_INT_MASK,
3243 CSR_FH_INT_STATUS,
3244 CSR_GPIO_IN,
3245 CSR_RESET,
3246 CSR_GP_CNTRL,
3247 CSR_HW_REV,
3248 CSR_EEPROM_REG,
3249 CSR_EEPROM_GP,
3250 CSR_OTP_GP_REG,
3251 CSR_GIO_REG,
3252 CSR_GP_UCODE_REG,
3253 CSR_GP_DRIVER_REG,
3254 CSR_UCODE_DRV_GP1,
3255 CSR_UCODE_DRV_GP2,
3256 CSR_LED_REG,
3257 CSR_DRAM_INT_TBL_REG,
3258 CSR_GIO_CHICKEN_BITS,
3259 CSR_ANA_PLL_CFG,
3260 CSR_HW_REV_WA_REG,
3261 CSR_DBG_HPET_MEM_REG
3262 };
3263 IWL_ERR(priv, "CSR values:\n");
3264 IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
3265 "CSR_INT_PERIODIC_REG)\n");
3266 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
3267 IWL_ERR(priv, " %25s: 0X%08x\n",
3268 get_csr_string(csr_tbl[i]),
3269 iwl_read32(priv, csr_tbl[i]));
3270 }
3271}
3272EXPORT_SYMBOL(iwl_dump_csr);
3273
3274const static char *get_fh_string(int cmd)
3275{
3276 switch (cmd) {
3277 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
3278 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
3279 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
3280 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
3281 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
3282 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
3283 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
3284 IWL_CMD(FH_TSSR_TX_STATUS_REG);
3285 IWL_CMD(FH_TSSR_TX_ERROR_REG);
3286 default:
3287 return "UNKNOWN";
3288
3289 }
3290}
3291
3292int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
3293{
3294 int i;
3295#ifdef CONFIG_IWLWIFI_DEBUG
3296 int pos = 0;
3297 size_t bufsz = 0;
3298#endif
3299 u32 fh_tbl[] = {
3300 FH_RSCSR_CHNL0_STTS_WPTR_REG,
3301 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
3302 FH_RSCSR_CHNL0_WPTR,
3303 FH_MEM_RCSR_CHNL0_CONFIG_REG,
3304 FH_MEM_RSSR_SHARED_CTRL_REG,
3305 FH_MEM_RSSR_RX_STATUS_REG,
3306 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
3307 FH_TSSR_TX_STATUS_REG,
3308 FH_TSSR_TX_ERROR_REG
3309 };
3310#ifdef CONFIG_IWLWIFI_DEBUG
3311 if (display) {
3312 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
3313 *buf = kmalloc(bufsz, GFP_KERNEL);
3314 if (!*buf)
3315 return -ENOMEM;
3316 pos += scnprintf(*buf + pos, bufsz - pos,
3317 "FH register values:\n");
3318 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
3319 pos += scnprintf(*buf + pos, bufsz - pos,
3320 " %34s: 0X%08x\n",
3321 get_fh_string(fh_tbl[i]),
3322 iwl_read_direct32(priv, fh_tbl[i]));
3323 }
3324 return pos;
3325 }
3326#endif
3327 IWL_ERR(priv, "FH register values:\n");
3328 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
3329 IWL_ERR(priv, " %34s: 0X%08x\n",
3330 get_fh_string(fh_tbl[i]),
3331 iwl_read_direct32(priv, fh_tbl[i]));
3332 }
3333 return 0;
3334}
3335EXPORT_SYMBOL(iwl_dump_fh);
3336
3337static void iwl_force_rf_reset(struct iwl_priv *priv)
3338{
3339 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3340 return;
3341
3342 if (!iwl_is_associated(priv)) {
3343 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
3344 return;
3345 }
3346 /*
3347 * There is no easy and better way to force reset the radio,
3348 * the only known method is switching channel which will force to
3349 * reset and tune the radio.
3350 * Use internal short scan (single channel) operation to should
3351 * achieve this objective.
3352 * Driver should reset the radio when number of consecutive missed
3353 * beacon, or any other uCode error condition detected.
3354 */
3355 IWL_DEBUG_INFO(priv, "perform radio reset.\n");
3356 iwl_internal_short_hw_scan(priv);
3357 return;
3358}
3359
3360
3361int iwl_force_reset(struct iwl_priv *priv, int mode)
3362{
3363 struct iwl_force_reset *force_reset;
3364
3365 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3366 return -EINVAL;
3367
3368 if (mode >= IWL_MAX_FORCE_RESET) {
3369 IWL_DEBUG_INFO(priv, "invalid reset request.\n");
3370 return -EINVAL;
3371 }
3372 force_reset = &priv->force_reset[mode];
3373 force_reset->reset_request_count++;
3374 if (force_reset->last_force_reset_jiffies &&
3375 time_after(force_reset->last_force_reset_jiffies +
3376 force_reset->reset_duration, jiffies)) {
3377 IWL_DEBUG_INFO(priv, "force reset rejected\n");
3378 force_reset->reset_reject_count++;
3379 return -EAGAIN;
3380 }
3381 force_reset->reset_success_count++;
3382 force_reset->last_force_reset_jiffies = jiffies;
3383 IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
3384 switch (mode) {
3385 case IWL_RF_RESET:
3386 iwl_force_rf_reset(priv);
3387 break;
3388 case IWL_FW_RESET:
3389 IWL_ERR(priv, "On demand firmware reload\n");
3390 /* Set the FW error flag -- cleared on iwl_down */
3391 set_bit(STATUS_FW_ERROR, &priv->status);
3392 wake_up_interruptible(&priv->wait_command_queue);
3393 /*
3394 * Keep the restart process from trying to send host
3395 * commands by clearing the INIT status bit
3396 */
3397 clear_bit(STATUS_READY, &priv->status);
3398 queue_work(priv->workqueue, &priv->restart);
3399 break;
3400 }
3401 return 0;
3402}
3403
3200#ifdef CONFIG_PM 3404#ifdef CONFIG_PM
3201 3405
3202int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state) 3406int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index b69e972671b2..4ef7739f9e8e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -63,8 +63,6 @@
63#ifndef __iwl_core_h__ 63#ifndef __iwl_core_h__
64#define __iwl_core_h__ 64#define __iwl_core_h__
65 65
66#include <generated/utsrelease.h>
67
68/************************ 66/************************
69 * forward declarations * 67 * forward declarations *
70 ************************/ 68 ************************/
@@ -72,8 +70,8 @@ struct iwl_host_cmd;
72struct iwl_cmd; 70struct iwl_cmd;
73 71
74 72
75#define IWLWIFI_VERSION UTS_RELEASE "-k" 73#define IWLWIFI_VERSION "in-tree:"
76#define DRV_COPYRIGHT "Copyright(c) 2003-2009 Intel Corporation" 74#define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation"
77#define DRV_AUTHOR "<ilw@linux.intel.com>" 75#define DRV_AUTHOR "<ilw@linux.intel.com>"
78 76
79#define IWL_PCI_DEVICE(dev, subdev, cfg) \ 77#define IWL_PCI_DEVICE(dev, subdev, cfg) \
@@ -119,6 +117,7 @@ struct iwl_apm_ops {
119struct iwl_temp_ops { 117struct iwl_temp_ops {
120 void (*temperature)(struct iwl_priv *priv); 118 void (*temperature)(struct iwl_priv *priv);
121 void (*set_ct_kill)(struct iwl_priv *priv); 119 void (*set_ct_kill)(struct iwl_priv *priv);
120 void (*set_calib_version)(struct iwl_priv *priv);
122}; 121};
123 122
124struct iwl_ucode_ops { 123struct iwl_ucode_ops {
@@ -169,8 +168,11 @@ struct iwl_lib_ops {
169 int (*is_valid_rtc_data_addr)(u32 addr); 168 int (*is_valid_rtc_data_addr)(u32 addr);
170 /* 1st ucode load */ 169 /* 1st ucode load */
171 int (*load_ucode)(struct iwl_priv *priv); 170 int (*load_ucode)(struct iwl_priv *priv);
172 void (*dump_nic_event_log)(struct iwl_priv *priv, bool full_log); 171 int (*dump_nic_event_log)(struct iwl_priv *priv,
172 bool full_log, char **buf, bool display);
173 void (*dump_nic_error_log)(struct iwl_priv *priv); 173 void (*dump_nic_error_log)(struct iwl_priv *priv);
174 void (*dump_csr)(struct iwl_priv *priv);
175 int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
174 int (*set_channel_switch)(struct iwl_priv *priv, u16 channel); 176 int (*set_channel_switch)(struct iwl_priv *priv, u16 channel);
175 /* power management */ 177 /* power management */
176 struct iwl_apm_ops apm_ops; 178 struct iwl_apm_ops apm_ops;
@@ -187,6 +189,8 @@ struct iwl_lib_ops {
187 189
188 /* temperature */ 190 /* temperature */
189 struct iwl_temp_ops temp_ops; 191 struct iwl_temp_ops temp_ops;
192 /* station management */
193 void (*add_bcast_station)(struct iwl_priv *priv);
190}; 194};
191 195
192struct iwl_led_ops { 196struct iwl_led_ops {
@@ -230,8 +234,9 @@ struct iwl_mod_params {
230 * @chain_noise_num_beacons: number of beacons used to compute chain noise 234 * @chain_noise_num_beacons: number of beacons used to compute chain noise
231 * @adv_thermal_throttle: support advance thermal throttle 235 * @adv_thermal_throttle: support advance thermal throttle
232 * @support_ct_kill_exit: support ct kill exit condition 236 * @support_ct_kill_exit: support ct kill exit condition
233 * @sm_ps_mode: spatial multiplexing power save mode
234 * @support_wimax_coexist: support wimax/wifi co-exist 237 * @support_wimax_coexist: support wimax/wifi co-exist
238 * @plcp_delta_threshold: plcp error rate threshold used to trigger
239 * radio tuning when there is a high receiving plcp error rate
235 * 240 *
236 * We enable the driver to be backward compatible wrt API version. The 241 * We enable the driver to be backward compatible wrt API version. The
237 * driver specifies which APIs it supports (with @ucode_api_max being the 242 * driver specifies which APIs it supports (with @ucode_api_max being the
@@ -287,8 +292,9 @@ struct iwl_cfg {
287 const bool supports_idle; 292 const bool supports_idle;
288 bool adv_thermal_throttle; 293 bool adv_thermal_throttle;
289 bool support_ct_kill_exit; 294 bool support_ct_kill_exit;
290 u8 sm_ps_mode;
291 const bool support_wimax_coexist; 295 const bool support_wimax_coexist;
296 u8 plcp_delta_threshold;
297 s32 chain_noise_scale;
292}; 298};
293 299
294/*************************** 300/***************************
@@ -332,13 +338,11 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb);
332int iwl_commit_rxon(struct iwl_priv *priv); 338int iwl_commit_rxon(struct iwl_priv *priv);
333int iwl_set_mode(struct iwl_priv *priv, int mode); 339int iwl_set_mode(struct iwl_priv *priv, int mode);
334int iwl_mac_add_interface(struct ieee80211_hw *hw, 340int iwl_mac_add_interface(struct ieee80211_hw *hw,
335 struct ieee80211_if_init_conf *conf); 341 struct ieee80211_vif *vif);
336void iwl_mac_remove_interface(struct ieee80211_hw *hw, 342void iwl_mac_remove_interface(struct ieee80211_hw *hw,
337 struct ieee80211_if_init_conf *conf); 343 struct ieee80211_vif *vif);
338int iwl_mac_config(struct ieee80211_hw *hw, u32 changed); 344int iwl_mac_config(struct ieee80211_hw *hw, u32 changed);
339void iwl_config_ap(struct iwl_priv *priv); 345void iwl_config_ap(struct iwl_priv *priv);
340int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
341 struct ieee80211_tx_queue_stats *stats);
342void iwl_mac_reset_tsf(struct ieee80211_hw *hw); 346void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
343int iwl_alloc_txq_mem(struct iwl_priv *priv); 347int iwl_alloc_txq_mem(struct iwl_priv *priv);
344void iwl_free_txq_mem(struct iwl_priv *priv); 348void iwl_free_txq_mem(struct iwl_priv *priv);
@@ -411,13 +415,13 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
411void iwl_cmd_queue_free(struct iwl_priv *priv); 415void iwl_cmd_queue_free(struct iwl_priv *priv);
412int iwl_rx_queue_alloc(struct iwl_priv *priv); 416int iwl_rx_queue_alloc(struct iwl_priv *priv);
413void iwl_rx_handle(struct iwl_priv *priv); 417void iwl_rx_handle(struct iwl_priv *priv);
414int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, 418void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
415 struct iwl_rx_queue *q); 419 struct iwl_rx_queue *q);
416void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 420void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
417void iwl_rx_replenish(struct iwl_priv *priv); 421void iwl_rx_replenish(struct iwl_priv *priv);
418void iwl_rx_replenish_now(struct iwl_priv *priv); 422void iwl_rx_replenish_now(struct iwl_priv *priv);
419int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 423int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
420int iwl_rx_queue_restock(struct iwl_priv *priv); 424void iwl_rx_queue_restock(struct iwl_priv *priv);
421int iwl_rx_queue_space(const struct iwl_rx_queue *q); 425int iwl_rx_queue_space(const struct iwl_rx_queue *q);
422void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority); 426void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority);
423void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); 427void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
@@ -425,6 +429,8 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
425/* Handlers */ 429/* Handlers */
426void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, 430void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
427 struct iwl_rx_mem_buffer *rxb); 431 struct iwl_rx_mem_buffer *rxb);
432void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
433 struct iwl_rx_mem_buffer *rxb);
428void iwl_rx_statistics(struct iwl_priv *priv, 434void iwl_rx_statistics(struct iwl_priv *priv,
429 struct iwl_rx_mem_buffer *rxb); 435 struct iwl_rx_mem_buffer *rxb);
430void iwl_reply_statistics(struct iwl_priv *priv, 436void iwl_reply_statistics(struct iwl_priv *priv,
@@ -445,9 +451,9 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
445void iwl_hw_txq_ctx_free(struct iwl_priv *priv); 451void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
446int iwl_hw_tx_queue_init(struct iwl_priv *priv, 452int iwl_hw_tx_queue_init(struct iwl_priv *priv,
447 struct iwl_tx_queue *txq); 453 struct iwl_tx_queue *txq);
448int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
449void iwl_free_tfds_in_queue(struct iwl_priv *priv, 454void iwl_free_tfds_in_queue(struct iwl_priv *priv,
450 int sta_id, int tid, int freed); 455 int sta_id, int tid, int freed);
456void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
451int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, 457int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
452 int slots_num, u32 txq_id); 458 int slots_num, u32 txq_id);
453void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); 459void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
@@ -497,6 +503,8 @@ void iwl_init_scan_params(struct iwl_priv *priv);
497int iwl_scan_cancel(struct iwl_priv *priv); 503int iwl_scan_cancel(struct iwl_priv *priv);
498int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); 504int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
499int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req); 505int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req);
506int iwl_internal_short_hw_scan(struct iwl_priv *priv);
507int iwl_force_reset(struct iwl_priv *priv, int mode);
500u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, 508u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
501 const u8 *ie, int ie_len, int left); 509 const u8 *ie, int ie_len, int left);
502void iwl_setup_rx_scan_handlers(struct iwl_priv *priv); 510void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
@@ -527,14 +535,6 @@ int iwl_send_calib_results(struct iwl_priv *priv);
527int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len); 535int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
528void iwl_calib_free_results(struct iwl_priv *priv); 536void iwl_calib_free_results(struct iwl_priv *priv);
529 537
530/*******************************************************************************
531 * Spectrum Measureemtns in iwl-spectrum.c
532 ******************************************************************************/
533#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
534void iwl_setup_spectrum_handlers(struct iwl_priv *priv);
535#else
536static inline void iwl_setup_spectrum_handlers(struct iwl_priv *priv) {}
537#endif
538/***************************************************** 538/*****************************************************
539 * S e n d i n g H o s t C o m m a n d s * 539 * S e n d i n g H o s t C o m m a n d s *
540 *****************************************************/ 540 *****************************************************/
@@ -583,7 +583,10 @@ int iwl_pci_resume(struct pci_dev *pdev);
583* Error Handling Debugging 583* Error Handling Debugging
584******************************************************/ 584******************************************************/
585void iwl_dump_nic_error_log(struct iwl_priv *priv); 585void iwl_dump_nic_error_log(struct iwl_priv *priv);
586void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log); 586int iwl_dump_nic_event_log(struct iwl_priv *priv,
587 bool full_log, char **buf, bool display);
588void iwl_dump_csr(struct iwl_priv *priv);
589int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display);
587#ifdef CONFIG_IWLWIFI_DEBUG 590#ifdef CONFIG_IWLWIFI_DEBUG
588void iwl_print_rx_config_cmd(struct iwl_priv *priv); 591void iwl_print_rx_config_cmd(struct iwl_priv *priv);
589#else 592#else
@@ -603,7 +606,7 @@ void iwlcore_free_geos(struct iwl_priv *priv);
603/*************** DRIVER STATUS FUNCTIONS *****/ 606/*************** DRIVER STATUS FUNCTIONS *****/
604 607
605#define STATUS_HCMD_ACTIVE 0 /* host command in progress */ 608#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
606#define STATUS_HCMD_SYNC_ACTIVE 1 /* sync host command in progress */ 609/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
607#define STATUS_INT_ENABLED 2 610#define STATUS_INT_ENABLED 2
608#define STATUS_RF_KILL_HW 3 611#define STATUS_RF_KILL_HW 3
609#define STATUS_CT_KILL 4 612#define STATUS_CT_KILL 4
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 1ec8cb4d5eae..808b7146bead 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -369,7 +369,7 @@
369#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_3x3_HYB (0x00000000) 369#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_3x3_HYB (0x00000000)
370#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_HYB (0x00000001) 370#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_HYB (0x00000001)
371#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA (0x00000002) 371#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA (0x00000002)
372 372#define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6 (0x00000004)
373 373
374/* GIO Chicken Bits (PCI Express bus link power management) */ 374/* GIO Chicken Bits (PCI Express bus link power management) */
375#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000) 375#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index d61293ab67c9..1c7b53d511c7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project. 5 * Portions of this file are derived from the ipw3945 project.
6 * 6 *
@@ -67,57 +67,6 @@ do { \
67 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ 67 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
68} while (0) 68} while (0)
69 69
70#ifdef CONFIG_IWLWIFI_DEBUGFS
71struct iwl_debugfs {
72 const char *name;
73 struct dentry *dir_drv;
74 struct dentry *dir_data;
75 struct dentry *dir_debug;
76 struct dentry *dir_rf;
77 struct dir_data_files {
78 struct dentry *file_sram;
79 struct dentry *file_nvm;
80 struct dentry *file_stations;
81 struct dentry *file_log_event;
82 struct dentry *file_channels;
83 struct dentry *file_status;
84 struct dentry *file_interrupt;
85 struct dentry *file_qos;
86 struct dentry *file_thermal_throttling;
87 struct dentry *file_led;
88 struct dentry *file_disable_ht40;
89 struct dentry *file_sleep_level_override;
90 struct dentry *file_current_sleep_command;
91 } dbgfs_data_files;
92 struct dir_rf_files {
93 struct dentry *file_disable_sensitivity;
94 struct dentry *file_disable_chain_noise;
95 struct dentry *file_disable_tx_power;
96 } dbgfs_rf_files;
97 struct dir_debug_files {
98 struct dentry *file_rx_statistics;
99 struct dentry *file_tx_statistics;
100 struct dentry *file_traffic_log;
101 struct dentry *file_rx_queue;
102 struct dentry *file_tx_queue;
103 struct dentry *file_ucode_rx_stats;
104 struct dentry *file_ucode_tx_stats;
105 struct dentry *file_ucode_general_stats;
106 struct dentry *file_sensitivity;
107 struct dentry *file_chain_noise;
108 struct dentry *file_tx_power;
109 struct dentry *file_power_save_status;
110 struct dentry *file_clear_ucode_statistics;
111 struct dentry *file_clear_traffic_statistics;
112 } dbgfs_debug_files;
113 u32 sram_offset;
114 u32 sram_len;
115};
116
117int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
118void iwl_dbgfs_unregister(struct iwl_priv *priv);
119#endif
120
121#else 70#else
122#define IWL_DEBUG(__priv, level, fmt, args...) 71#define IWL_DEBUG(__priv, level, fmt, args...)
123#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) 72#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
@@ -126,9 +75,10 @@ static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
126{} 75{}
127#endif /* CONFIG_IWLWIFI_DEBUG */ 76#endif /* CONFIG_IWLWIFI_DEBUG */
128 77
129 78#ifdef CONFIG_IWLWIFI_DEBUGFS
130 79int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
131#ifndef CONFIG_IWLWIFI_DEBUGFS 80void iwl_dbgfs_unregister(struct iwl_priv *priv);
81#else
132static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) 82static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
133{ 83{
134 return 0; 84 return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 21e0f6699daf..7bf44f146799 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -41,43 +41,28 @@
41#include "iwl-calib.h" 41#include "iwl-calib.h"
42 42
43/* create and remove of files */ 43/* create and remove of files */
44#define DEBUGFS_ADD_DIR(name, parent) do { \ 44#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
45 dbgfs->dir_##name = debugfs_create_dir(#name, parent); \ 45 if (!debugfs_create_file(#name, mode, parent, priv, \
46 if (!(dbgfs->dir_##name)) \ 46 &iwl_dbgfs_##name##_ops)) \
47 goto err; \ 47 goto err; \
48} while (0) 48} while (0)
49 49
50#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ 50#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
51 dbgfs->dbgfs_##parent##_files.file_##name = \ 51 struct dentry *__tmp; \
52 debugfs_create_file(#name, mode, \ 52 __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
53 dbgfs->dir_##parent, priv, \ 53 parent, ptr); \
54 &iwl_dbgfs_##name##_ops); \ 54 if (IS_ERR(__tmp) || !__tmp) \
55 if (!(dbgfs->dbgfs_##parent##_files.file_##name)) \ 55 goto err; \
56 goto err; \
57} while (0) 56} while (0)
58 57
59#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \ 58#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
60 dbgfs->dbgfs_##parent##_files.file_##name = \ 59 struct dentry *__tmp; \
61 debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \ 60 __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
62 dbgfs->dir_##parent, ptr); \ 61 parent, ptr); \
63 if (IS_ERR(dbgfs->dbgfs_##parent##_files.file_##name) \ 62 if (IS_ERR(__tmp) || !__tmp) \
64 || !dbgfs->dbgfs_##parent##_files.file_##name) \ 63 goto err; \
65 goto err; \
66} while (0) 64} while (0)
67 65
68#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
69 dbgfs->dbgfs_##parent##_files.file_##name = \
70 debugfs_create_x32(#name, S_IRUSR, dbgfs->dir_##parent, ptr); \
71 if (IS_ERR(dbgfs->dbgfs_##parent##_files.file_##name) \
72 || !dbgfs->dbgfs_##parent##_files.file_##name) \
73 goto err; \
74} while (0)
75
76#define DEBUGFS_REMOVE(name) do { \
77 debugfs_remove(name); \
78 name = NULL; \
79} while (0);
80
81/* file operation */ 66/* file operation */
82#define DEBUGFS_READ_FUNC(name) \ 67#define DEBUGFS_READ_FUNC(name) \
83static ssize_t iwl_dbgfs_##name##_read(struct file *file, \ 68static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
@@ -125,7 +110,7 @@ static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
125 char __user *user_buf, 110 char __user *user_buf,
126 size_t count, loff_t *ppos) { 111 size_t count, loff_t *ppos) {
127 112
128 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 113 struct iwl_priv *priv = file->private_data;
129 char *buf; 114 char *buf;
130 int pos = 0; 115 int pos = 0;
131 116
@@ -184,7 +169,7 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
184 char __user *user_buf, 169 char __user *user_buf,
185 size_t count, loff_t *ppos) { 170 size_t count, loff_t *ppos) {
186 171
187 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 172 struct iwl_priv *priv = file->private_data;
188 char *buf; 173 char *buf;
189 int pos = 0; 174 int pos = 0;
190 int cnt; 175 int cnt;
@@ -232,28 +217,28 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
232 ssize_t ret; 217 ssize_t ret;
233 int i; 218 int i;
234 int pos = 0; 219 int pos = 0;
235 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 220 struct iwl_priv *priv = file->private_data;
236 size_t bufsz; 221 size_t bufsz;
237 222
238 /* default is to dump the entire data segment */ 223 /* default is to dump the entire data segment */
239 if (!priv->dbgfs->sram_offset && !priv->dbgfs->sram_len) { 224 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
240 priv->dbgfs->sram_offset = 0x800000; 225 priv->dbgfs_sram_offset = 0x800000;
241 if (priv->ucode_type == UCODE_INIT) 226 if (priv->ucode_type == UCODE_INIT)
242 priv->dbgfs->sram_len = priv->ucode_init_data.len; 227 priv->dbgfs_sram_len = priv->ucode_init_data.len;
243 else 228 else
244 priv->dbgfs->sram_len = priv->ucode_data.len; 229 priv->dbgfs_sram_len = priv->ucode_data.len;
245 } 230 }
246 bufsz = 30 + priv->dbgfs->sram_len * sizeof(char) * 10; 231 bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10;
247 buf = kmalloc(bufsz, GFP_KERNEL); 232 buf = kmalloc(bufsz, GFP_KERNEL);
248 if (!buf) 233 if (!buf)
249 return -ENOMEM; 234 return -ENOMEM;
250 pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", 235 pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
251 priv->dbgfs->sram_len); 236 priv->dbgfs_sram_len);
252 pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", 237 pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
253 priv->dbgfs->sram_offset); 238 priv->dbgfs_sram_offset);
254 for (i = priv->dbgfs->sram_len; i > 0; i -= 4) { 239 for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
255 val = iwl_read_targ_mem(priv, priv->dbgfs->sram_offset + \ 240 val = iwl_read_targ_mem(priv, priv->dbgfs_sram_offset + \
256 priv->dbgfs->sram_len - i); 241 priv->dbgfs_sram_len - i);
257 if (i < 4) { 242 if (i < 4) {
258 switch (i) { 243 switch (i) {
259 case 1: 244 case 1:
@@ -293,11 +278,11 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
293 return -EFAULT; 278 return -EFAULT;
294 279
295 if (sscanf(buf, "%x,%x", &offset, &len) == 2) { 280 if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
296 priv->dbgfs->sram_offset = offset; 281 priv->dbgfs_sram_offset = offset;
297 priv->dbgfs->sram_len = len; 282 priv->dbgfs_sram_len = len;
298 } else { 283 } else {
299 priv->dbgfs->sram_offset = 0; 284 priv->dbgfs_sram_offset = 0;
300 priv->dbgfs->sram_len = 0; 285 priv->dbgfs_sram_len = 0;
301 } 286 }
302 287
303 return count; 288 return count;
@@ -306,7 +291,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
306static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, 291static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
307 size_t count, loff_t *ppos) 292 size_t count, loff_t *ppos)
308{ 293{
309 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 294 struct iwl_priv *priv = file->private_data;
310 struct iwl_station_entry *station; 295 struct iwl_station_entry *station;
311 int max_sta = priv->hw_params.max_stations; 296 int max_sta = priv->hw_params.max_stations;
312 char *buf; 297 char *buf;
@@ -376,7 +361,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
376 loff_t *ppos) 361 loff_t *ppos)
377{ 362{
378 ssize_t ret; 363 ssize_t ret;
379 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 364 struct iwl_priv *priv = file->private_data;
380 int pos = 0, ofs = 0, buf_size = 0; 365 int pos = 0, ofs = 0, buf_size = 0;
381 const u8 *ptr; 366 const u8 *ptr;
382 char *buf; 367 char *buf;
@@ -420,6 +405,24 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
420 return ret; 405 return ret;
421} 406}
422 407
408static ssize_t iwl_dbgfs_log_event_read(struct file *file,
409 char __user *user_buf,
410 size_t count, loff_t *ppos)
411{
412 struct iwl_priv *priv = file->private_data;
413 char *buf;
414 int pos = 0;
415 ssize_t ret = -ENOMEM;
416
417 ret = pos = priv->cfg->ops->lib->dump_nic_event_log(
418 priv, true, &buf, true);
419 if (buf) {
420 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
421 kfree(buf);
422 }
423 return ret;
424}
425
423static ssize_t iwl_dbgfs_log_event_write(struct file *file, 426static ssize_t iwl_dbgfs_log_event_write(struct file *file,
424 const char __user *user_buf, 427 const char __user *user_buf,
425 size_t count, loff_t *ppos) 428 size_t count, loff_t *ppos)
@@ -436,7 +439,8 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
436 if (sscanf(buf, "%d", &event_log_flag) != 1) 439 if (sscanf(buf, "%d", &event_log_flag) != 1)
437 return -EFAULT; 440 return -EFAULT;
438 if (event_log_flag == 1) 441 if (event_log_flag == 1)
439 priv->cfg->ops->lib->dump_nic_event_log(priv, true); 442 priv->cfg->ops->lib->dump_nic_event_log(priv, true,
443 NULL, false);
440 444
441 return count; 445 return count;
442} 446}
@@ -446,7 +450,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
446static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf, 450static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
447 size_t count, loff_t *ppos) 451 size_t count, loff_t *ppos)
448{ 452{
449 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 453 struct iwl_priv *priv = file->private_data;
450 struct ieee80211_channel *channels = NULL; 454 struct ieee80211_channel *channels = NULL;
451 const struct ieee80211_supported_band *supp_band = NULL; 455 const struct ieee80211_supported_band *supp_band = NULL;
452 int pos = 0, i, bufsz = PAGE_SIZE; 456 int pos = 0, i, bufsz = PAGE_SIZE;
@@ -519,15 +523,13 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
519 char __user *user_buf, 523 char __user *user_buf,
520 size_t count, loff_t *ppos) { 524 size_t count, loff_t *ppos) {
521 525
522 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 526 struct iwl_priv *priv = file->private_data;
523 char buf[512]; 527 char buf[512];
524 int pos = 0; 528 int pos = 0;
525 const size_t bufsz = sizeof(buf); 529 const size_t bufsz = sizeof(buf);
526 530
527 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n", 531 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
528 test_bit(STATUS_HCMD_ACTIVE, &priv->status)); 532 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
529 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
530 test_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status));
531 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n", 533 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
532 test_bit(STATUS_INT_ENABLED, &priv->status)); 534 test_bit(STATUS_INT_ENABLED, &priv->status));
533 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n", 535 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
@@ -567,7 +569,7 @@ static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
567 char __user *user_buf, 569 char __user *user_buf,
568 size_t count, loff_t *ppos) { 570 size_t count, loff_t *ppos) {
569 571
570 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 572 struct iwl_priv *priv = file->private_data;
571 int pos = 0; 573 int pos = 0;
572 int cnt = 0; 574 int cnt = 0;
573 char *buf; 575 char *buf;
@@ -654,7 +656,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
654static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf, 656static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
655 size_t count, loff_t *ppos) 657 size_t count, loff_t *ppos)
656{ 658{
657 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 659 struct iwl_priv *priv = file->private_data;
658 int pos = 0, i; 660 int pos = 0, i;
659 char buf[256]; 661 char buf[256];
660 const size_t bufsz = sizeof(buf); 662 const size_t bufsz = sizeof(buf);
@@ -677,7 +679,7 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
677static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf, 679static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
678 size_t count, loff_t *ppos) 680 size_t count, loff_t *ppos)
679{ 681{
680 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 682 struct iwl_priv *priv = file->private_data;
681 int pos = 0; 683 int pos = 0;
682 char buf[256]; 684 char buf[256];
683 const size_t bufsz = sizeof(buf); 685 const size_t bufsz = sizeof(buf);
@@ -703,7 +705,7 @@ static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
703 char __user *user_buf, 705 char __user *user_buf,
704 size_t count, loff_t *ppos) 706 size_t count, loff_t *ppos)
705{ 707{
706 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 708 struct iwl_priv *priv = file->private_data;
707 struct iwl_tt_mgmt *tt = &priv->thermal_throttle; 709 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
708 struct iwl_tt_restriction *restriction; 710 struct iwl_tt_restriction *restriction;
709 char buf[100]; 711 char buf[100];
@@ -763,7 +765,7 @@ static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file,
763 char __user *user_buf, 765 char __user *user_buf,
764 size_t count, loff_t *ppos) 766 size_t count, loff_t *ppos)
765{ 767{
766 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 768 struct iwl_priv *priv = file->private_data;
767 char buf[100]; 769 char buf[100];
768 int pos = 0; 770 int pos = 0;
769 const size_t bufsz = sizeof(buf); 771 const size_t bufsz = sizeof(buf);
@@ -811,7 +813,9 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
811 813
812 priv->power_data.debug_sleep_level_override = value; 814 priv->power_data.debug_sleep_level_override = value;
813 815
816 mutex_lock(&priv->mutex);
814 iwl_power_update_mode(priv, true); 817 iwl_power_update_mode(priv, true);
818 mutex_unlock(&priv->mutex);
815 819
816 return count; 820 return count;
817} 821}
@@ -820,7 +824,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_read(struct file *file,
820 char __user *user_buf, 824 char __user *user_buf,
821 size_t count, loff_t *ppos) 825 size_t count, loff_t *ppos)
822{ 826{
823 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 827 struct iwl_priv *priv = file->private_data;
824 char buf[10]; 828 char buf[10];
825 int pos, value; 829 int pos, value;
826 const size_t bufsz = sizeof(buf); 830 const size_t bufsz = sizeof(buf);
@@ -838,7 +842,7 @@ static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
838 char __user *user_buf, 842 char __user *user_buf,
839 size_t count, loff_t *ppos) 843 size_t count, loff_t *ppos)
840{ 844{
841 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 845 struct iwl_priv *priv = file->private_data;
842 char buf[200]; 846 char buf[200];
843 int pos = 0, i; 847 int pos = 0, i;
844 const size_t bufsz = sizeof(buf); 848 const size_t bufsz = sizeof(buf);
@@ -859,7 +863,7 @@ static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
859} 863}
860 864
861DEBUGFS_READ_WRITE_FILE_OPS(sram); 865DEBUGFS_READ_WRITE_FILE_OPS(sram);
862DEBUGFS_WRITE_FILE_OPS(log_event); 866DEBUGFS_READ_WRITE_FILE_OPS(log_event);
863DEBUGFS_READ_FILE_OPS(nvm); 867DEBUGFS_READ_FILE_OPS(nvm);
864DEBUGFS_READ_FILE_OPS(stations); 868DEBUGFS_READ_FILE_OPS(stations);
865DEBUGFS_READ_FILE_OPS(channels); 869DEBUGFS_READ_FILE_OPS(channels);
@@ -976,7 +980,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
976 char __user *user_buf, 980 char __user *user_buf,
977 size_t count, loff_t *ppos) { 981 size_t count, loff_t *ppos) {
978 982
979 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 983 struct iwl_priv *priv = file->private_data;
980 struct iwl_tx_queue *txq; 984 struct iwl_tx_queue *txq;
981 struct iwl_queue *q; 985 struct iwl_queue *q;
982 char *buf; 986 char *buf;
@@ -1022,7 +1026,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1022 char __user *user_buf, 1026 char __user *user_buf,
1023 size_t count, loff_t *ppos) { 1027 size_t count, loff_t *ppos) {
1024 1028
1025 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1029 struct iwl_priv *priv = file->private_data;
1026 struct iwl_rx_queue *rxq = &priv->rxq; 1030 struct iwl_rx_queue *rxq = &priv->rxq;
1027 char buf[256]; 1031 char buf[256];
1028 int pos = 0; 1032 int pos = 0;
@@ -1063,36 +1067,33 @@ static int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf,
1063 return p; 1067 return p;
1064} 1068}
1065 1069
1070static const char ucode_stats_header[] =
1071 "%-32s current acumulative delta max\n";
1072static const char ucode_stats_short_format[] =
1073 " %-30s %10u\n";
1074static const char ucode_stats_format[] =
1075 " %-30s %10u %10u %10u %10u\n";
1066 1076
1067static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file, 1077static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
1068 char __user *user_buf, 1078 char __user *user_buf,
1069 size_t count, loff_t *ppos) 1079 size_t count, loff_t *ppos)
1070{ 1080{
1071 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1081 struct iwl_priv *priv = file->private_data;
1072 int pos = 0; 1082 int pos = 0;
1073 char *buf; 1083 char *buf;
1074 int bufsz = sizeof(struct statistics_rx_phy) * 20 + 1084 int bufsz = sizeof(struct statistics_rx_phy) * 40 +
1075 sizeof(struct statistics_rx_non_phy) * 20 + 1085 sizeof(struct statistics_rx_non_phy) * 40 +
1076 sizeof(struct statistics_rx_ht_phy) * 20 + 400; 1086 sizeof(struct statistics_rx_ht_phy) * 40 + 400;
1077 ssize_t ret; 1087 ssize_t ret;
1078 struct statistics_rx_phy *ofdm, *accum_ofdm; 1088 struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
1079 struct statistics_rx_phy *cck, *accum_cck; 1089 struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
1080 struct statistics_rx_non_phy *general, *accum_general; 1090 struct statistics_rx_non_phy *general, *accum_general;
1081 struct statistics_rx_ht_phy *ht, *accum_ht; 1091 struct statistics_rx_non_phy *delta_general, *max_general;
1092 struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
1082 1093
1083 if (!iwl_is_alive(priv)) 1094 if (!iwl_is_alive(priv))
1084 return -EAGAIN; 1095 return -EAGAIN;
1085 1096
1086 /* make request to uCode to retrieve statistics information */
1087 mutex_lock(&priv->mutex);
1088 ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
1089 mutex_unlock(&priv->mutex);
1090
1091 if (ret) {
1092 IWL_ERR(priv,
1093 "Error sending statistics request: %zd\n", ret);
1094 return -EAGAIN;
1095 }
1096 buf = kzalloc(bufsz, GFP_KERNEL); 1097 buf = kzalloc(bufsz, GFP_KERNEL);
1097 if (!buf) { 1098 if (!buf) {
1098 IWL_ERR(priv, "Can not allocate Buffer\n"); 1099 IWL_ERR(priv, "Can not allocate Buffer\n");
@@ -1111,264 +1112,401 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
1111 accum_cck = &priv->accum_statistics.rx.cck; 1112 accum_cck = &priv->accum_statistics.rx.cck;
1112 accum_general = &priv->accum_statistics.rx.general; 1113 accum_general = &priv->accum_statistics.rx.general;
1113 accum_ht = &priv->accum_statistics.rx.ofdm_ht; 1114 accum_ht = &priv->accum_statistics.rx.ofdm_ht;
1115 delta_ofdm = &priv->delta_statistics.rx.ofdm;
1116 delta_cck = &priv->delta_statistics.rx.cck;
1117 delta_general = &priv->delta_statistics.rx.general;
1118 delta_ht = &priv->delta_statistics.rx.ofdm_ht;
1119 max_ofdm = &priv->max_delta.rx.ofdm;
1120 max_cck = &priv->max_delta.rx.cck;
1121 max_general = &priv->max_delta.rx.general;
1122 max_ht = &priv->max_delta.rx.ofdm_ht;
1123
1114 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); 1124 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
1115 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM:\n"); 1125 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
1116 pos += scnprintf(buf + pos, bufsz - pos, 1126 "Statistics_Rx - OFDM:");
1117 "\t\t\tcurrent\t\t\taccumulative\n"); 1127 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1118 pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt:\t\t%u\t\t\t%u\n", 1128 "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
1119 le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt); 1129 accum_ofdm->ina_cnt,
1120 pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt:\t\t%u\t\t\t%u\n", 1130 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
1121 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt); 1131 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1122 pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n", 1132 "fina_cnt:",
1123 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err); 1133 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
1124 pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n", 1134 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
1125 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err); 1135 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1126 pos += scnprintf(buf + pos, bufsz - pos, 1136 "plcp_err:",
1127 "overrun_err:\t\t%u\t\t\t%u\n", 1137 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
1138 delta_ofdm->plcp_err, max_ofdm->plcp_err);
1139 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1140 "crc32_err:",
1141 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
1142 delta_ofdm->crc32_err, max_ofdm->crc32_err);
1143 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1144 "overrun_err:",
1128 le32_to_cpu(ofdm->overrun_err), 1145 le32_to_cpu(ofdm->overrun_err),
1129 accum_ofdm->overrun_err); 1146 accum_ofdm->overrun_err,
1130 pos += scnprintf(buf + pos, bufsz - pos, 1147 delta_ofdm->overrun_err, max_ofdm->overrun_err);
1131 "early_overrun_err:\t%u\t\t\t%u\n", 1148 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1149 "early_overrun_err:",
1132 le32_to_cpu(ofdm->early_overrun_err), 1150 le32_to_cpu(ofdm->early_overrun_err),
1133 accum_ofdm->early_overrun_err); 1151 accum_ofdm->early_overrun_err,
1134 pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n", 1152 delta_ofdm->early_overrun_err,
1153 max_ofdm->early_overrun_err);
1154 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1155 "crc32_good:",
1135 le32_to_cpu(ofdm->crc32_good), 1156 le32_to_cpu(ofdm->crc32_good),
1136 accum_ofdm->crc32_good); 1157 accum_ofdm->crc32_good,
1137 pos += scnprintf(buf + pos, bufsz - pos, 1158 delta_ofdm->crc32_good, max_ofdm->crc32_good);
1138 "false_alarm_cnt:\t%u\t\t\t%u\n", 1159 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1160 "false_alarm_cnt:",
1139 le32_to_cpu(ofdm->false_alarm_cnt), 1161 le32_to_cpu(ofdm->false_alarm_cnt),
1140 accum_ofdm->false_alarm_cnt); 1162 accum_ofdm->false_alarm_cnt,
1141 pos += scnprintf(buf + pos, bufsz - pos, 1163 delta_ofdm->false_alarm_cnt,
1142 "fina_sync_err_cnt:\t%u\t\t\t%u\n", 1164 max_ofdm->false_alarm_cnt);
1165 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1166 "fina_sync_err_cnt:",
1143 le32_to_cpu(ofdm->fina_sync_err_cnt), 1167 le32_to_cpu(ofdm->fina_sync_err_cnt),
1144 accum_ofdm->fina_sync_err_cnt); 1168 accum_ofdm->fina_sync_err_cnt,
1145 pos += scnprintf(buf + pos, bufsz - pos, 1169 delta_ofdm->fina_sync_err_cnt,
1146 "sfd_timeout:\t\t%u\t\t\t%u\n", 1170 max_ofdm->fina_sync_err_cnt);
1171 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1172 "sfd_timeout:",
1147 le32_to_cpu(ofdm->sfd_timeout), 1173 le32_to_cpu(ofdm->sfd_timeout),
1148 accum_ofdm->sfd_timeout); 1174 accum_ofdm->sfd_timeout,
1149 pos += scnprintf(buf + pos, bufsz - pos, 1175 delta_ofdm->sfd_timeout,
1150 "fina_timeout:\t\t%u\t\t\t%u\n", 1176 max_ofdm->sfd_timeout);
1177 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1178 "fina_timeout:",
1151 le32_to_cpu(ofdm->fina_timeout), 1179 le32_to_cpu(ofdm->fina_timeout),
1152 accum_ofdm->fina_timeout); 1180 accum_ofdm->fina_timeout,
1153 pos += scnprintf(buf + pos, bufsz - pos, 1181 delta_ofdm->fina_timeout,
1154 "unresponded_rts:\t%u\t\t\t%u\n", 1182 max_ofdm->fina_timeout);
1183 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1184 "unresponded_rts:",
1155 le32_to_cpu(ofdm->unresponded_rts), 1185 le32_to_cpu(ofdm->unresponded_rts),
1156 accum_ofdm->unresponded_rts); 1186 accum_ofdm->unresponded_rts,
1157 pos += scnprintf(buf + pos, bufsz - pos, 1187 delta_ofdm->unresponded_rts,
1158 "rxe_frame_lmt_ovrun:\t%u\t\t\t%u\n", 1188 max_ofdm->unresponded_rts);
1189 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1190 "rxe_frame_lmt_ovrun:",
1159 le32_to_cpu(ofdm->rxe_frame_limit_overrun), 1191 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
1160 accum_ofdm->rxe_frame_limit_overrun); 1192 accum_ofdm->rxe_frame_limit_overrun,
1161 pos += scnprintf(buf + pos, bufsz - pos, 1193 delta_ofdm->rxe_frame_limit_overrun,
1162 "sent_ack_cnt:\t\t%u\t\t\t%u\n", 1194 max_ofdm->rxe_frame_limit_overrun);
1195 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1196 "sent_ack_cnt:",
1163 le32_to_cpu(ofdm->sent_ack_cnt), 1197 le32_to_cpu(ofdm->sent_ack_cnt),
1164 accum_ofdm->sent_ack_cnt); 1198 accum_ofdm->sent_ack_cnt,
1165 pos += scnprintf(buf + pos, bufsz - pos, 1199 delta_ofdm->sent_ack_cnt,
1166 "sent_cts_cnt:\t\t%u\t\t\t%u\n", 1200 max_ofdm->sent_ack_cnt);
1201 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1202 "sent_cts_cnt:",
1167 le32_to_cpu(ofdm->sent_cts_cnt), 1203 le32_to_cpu(ofdm->sent_cts_cnt),
1168 accum_ofdm->sent_cts_cnt); 1204 accum_ofdm->sent_cts_cnt,
1169 pos += scnprintf(buf + pos, bufsz - pos, 1205 delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
1170 "sent_ba_rsp_cnt:\t%u\t\t\t%u\n", 1206 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1207 "sent_ba_rsp_cnt:",
1171 le32_to_cpu(ofdm->sent_ba_rsp_cnt), 1208 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
1172 accum_ofdm->sent_ba_rsp_cnt); 1209 accum_ofdm->sent_ba_rsp_cnt,
1173 pos += scnprintf(buf + pos, bufsz - pos, 1210 delta_ofdm->sent_ba_rsp_cnt,
1174 "dsp_self_kill:\t\t%u\t\t\t%u\n", 1211 max_ofdm->sent_ba_rsp_cnt);
1212 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1213 "dsp_self_kill:",
1175 le32_to_cpu(ofdm->dsp_self_kill), 1214 le32_to_cpu(ofdm->dsp_self_kill),
1176 accum_ofdm->dsp_self_kill); 1215 accum_ofdm->dsp_self_kill,
1177 pos += scnprintf(buf + pos, bufsz - pos, 1216 delta_ofdm->dsp_self_kill,
1178 "mh_format_err:\t\t%u\t\t\t%u\n", 1217 max_ofdm->dsp_self_kill);
1218 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1219 "mh_format_err:",
1179 le32_to_cpu(ofdm->mh_format_err), 1220 le32_to_cpu(ofdm->mh_format_err),
1180 accum_ofdm->mh_format_err); 1221 accum_ofdm->mh_format_err,
1181 pos += scnprintf(buf + pos, bufsz - pos, 1222 delta_ofdm->mh_format_err,
1182 "re_acq_main_rssi_sum:\t%u\t\t\t%u\n", 1223 max_ofdm->mh_format_err);
1224 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1225 "re_acq_main_rssi_sum:",
1183 le32_to_cpu(ofdm->re_acq_main_rssi_sum), 1226 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
1184 accum_ofdm->re_acq_main_rssi_sum); 1227 accum_ofdm->re_acq_main_rssi_sum,
1185 1228 delta_ofdm->re_acq_main_rssi_sum,
1186 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - CCK:\n"); 1229 max_ofdm->re_acq_main_rssi_sum);
1187 pos += scnprintf(buf + pos, bufsz - pos, 1230
1188 "\t\t\tcurrent\t\t\taccumulative\n"); 1231 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
1189 pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt:\t\t%u\t\t\t%u\n", 1232 "Statistics_Rx - CCK:");
1190 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt); 1233 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1191 pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt:\t\t%u\t\t\t%u\n", 1234 "ina_cnt:",
1192 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt); 1235 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
1193 pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n", 1236 delta_cck->ina_cnt, max_cck->ina_cnt);
1194 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err); 1237 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1195 pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n", 1238 "fina_cnt:",
1196 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err); 1239 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
1197 pos += scnprintf(buf + pos, bufsz - pos, 1240 delta_cck->fina_cnt, max_cck->fina_cnt);
1198 "overrun_err:\t\t%u\t\t\t%u\n", 1241 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1242 "plcp_err:",
1243 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
1244 delta_cck->plcp_err, max_cck->plcp_err);
1245 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1246 "crc32_err:",
1247 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
1248 delta_cck->crc32_err, max_cck->crc32_err);
1249 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1250 "overrun_err:",
1199 le32_to_cpu(cck->overrun_err), 1251 le32_to_cpu(cck->overrun_err),
1200 accum_cck->overrun_err); 1252 accum_cck->overrun_err,
1201 pos += scnprintf(buf + pos, bufsz - pos, 1253 delta_cck->overrun_err, max_cck->overrun_err);
1202 "early_overrun_err:\t%u\t\t\t%u\n", 1254 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1255 "early_overrun_err:",
1203 le32_to_cpu(cck->early_overrun_err), 1256 le32_to_cpu(cck->early_overrun_err),
1204 accum_cck->early_overrun_err); 1257 accum_cck->early_overrun_err,
1205 pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n", 1258 delta_cck->early_overrun_err,
1206 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good); 1259 max_cck->early_overrun_err);
1207 pos += scnprintf(buf + pos, bufsz - pos, 1260 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1208 "false_alarm_cnt:\t%u\t\t\t%u\n", 1261 "crc32_good:",
1262 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
1263 delta_cck->crc32_good,
1264 max_cck->crc32_good);
1265 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1266 "false_alarm_cnt:",
1209 le32_to_cpu(cck->false_alarm_cnt), 1267 le32_to_cpu(cck->false_alarm_cnt),
1210 accum_cck->false_alarm_cnt); 1268 accum_cck->false_alarm_cnt,
1211 pos += scnprintf(buf + pos, bufsz - pos, 1269 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
1212 "fina_sync_err_cnt:\t%u\t\t\t%u\n", 1270 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1271 "fina_sync_err_cnt:",
1213 le32_to_cpu(cck->fina_sync_err_cnt), 1272 le32_to_cpu(cck->fina_sync_err_cnt),
1214 accum_cck->fina_sync_err_cnt); 1273 accum_cck->fina_sync_err_cnt,
1215 pos += scnprintf(buf + pos, bufsz - pos, 1274 delta_cck->fina_sync_err_cnt,
1216 "sfd_timeout:\t\t%u\t\t\t%u\n", 1275 max_cck->fina_sync_err_cnt);
1276 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1277 "sfd_timeout:",
1217 le32_to_cpu(cck->sfd_timeout), 1278 le32_to_cpu(cck->sfd_timeout),
1218 accum_cck->sfd_timeout); 1279 accum_cck->sfd_timeout,
1219 pos += scnprintf(buf + pos, bufsz - pos, 1280 delta_cck->sfd_timeout, max_cck->sfd_timeout);
1220 "fina_timeout:\t\t%u\t\t\t%u\n", 1281 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1282 "fina_timeout:",
1221 le32_to_cpu(cck->fina_timeout), 1283 le32_to_cpu(cck->fina_timeout),
1222 accum_cck->fina_timeout); 1284 accum_cck->fina_timeout,
1223 pos += scnprintf(buf + pos, bufsz - pos, 1285 delta_cck->fina_timeout, max_cck->fina_timeout);
1224 "unresponded_rts:\t%u\t\t\t%u\n", 1286 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1287 "unresponded_rts:",
1225 le32_to_cpu(cck->unresponded_rts), 1288 le32_to_cpu(cck->unresponded_rts),
1226 accum_cck->unresponded_rts); 1289 accum_cck->unresponded_rts,
1227 pos += scnprintf(buf + pos, bufsz - pos, 1290 delta_cck->unresponded_rts,
1228 "rxe_frame_lmt_ovrun:\t%u\t\t\t%u\n", 1291 max_cck->unresponded_rts);
1292 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1293 "rxe_frame_lmt_ovrun:",
1229 le32_to_cpu(cck->rxe_frame_limit_overrun), 1294 le32_to_cpu(cck->rxe_frame_limit_overrun),
1230 accum_cck->rxe_frame_limit_overrun); 1295 accum_cck->rxe_frame_limit_overrun,
1231 pos += scnprintf(buf + pos, bufsz - pos, 1296 delta_cck->rxe_frame_limit_overrun,
1232 "sent_ack_cnt:\t\t%u\t\t\t%u\n", 1297 max_cck->rxe_frame_limit_overrun);
1298 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1299 "sent_ack_cnt:",
1233 le32_to_cpu(cck->sent_ack_cnt), 1300 le32_to_cpu(cck->sent_ack_cnt),
1234 accum_cck->sent_ack_cnt); 1301 accum_cck->sent_ack_cnt,
1235 pos += scnprintf(buf + pos, bufsz - pos, 1302 delta_cck->sent_ack_cnt,
1236 "sent_cts_cnt:\t\t%u\t\t\t%u\n", 1303 max_cck->sent_ack_cnt);
1304 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1305 "sent_cts_cnt:",
1237 le32_to_cpu(cck->sent_cts_cnt), 1306 le32_to_cpu(cck->sent_cts_cnt),
1238 accum_cck->sent_cts_cnt); 1307 accum_cck->sent_cts_cnt,
1239 pos += scnprintf(buf + pos, bufsz - pos, 1308 delta_cck->sent_cts_cnt,
1240 "sent_ba_rsp_cnt:\t%u\t\t\t%u\n", 1309 max_cck->sent_cts_cnt);
1310 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1311 "sent_ba_rsp_cnt:",
1241 le32_to_cpu(cck->sent_ba_rsp_cnt), 1312 le32_to_cpu(cck->sent_ba_rsp_cnt),
1242 accum_cck->sent_ba_rsp_cnt); 1313 accum_cck->sent_ba_rsp_cnt,
1243 pos += scnprintf(buf + pos, bufsz - pos, 1314 delta_cck->sent_ba_rsp_cnt,
1244 "dsp_self_kill:\t\t%u\t\t\t%u\n", 1315 max_cck->sent_ba_rsp_cnt);
1316 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1317 "dsp_self_kill:",
1245 le32_to_cpu(cck->dsp_self_kill), 1318 le32_to_cpu(cck->dsp_self_kill),
1246 accum_cck->dsp_self_kill); 1319 accum_cck->dsp_self_kill,
1247 pos += scnprintf(buf + pos, bufsz - pos, 1320 delta_cck->dsp_self_kill,
1248 "mh_format_err:\t\t%u\t\t\t%u\n", 1321 max_cck->dsp_self_kill);
1322 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1323 "mh_format_err:",
1249 le32_to_cpu(cck->mh_format_err), 1324 le32_to_cpu(cck->mh_format_err),
1250 accum_cck->mh_format_err); 1325 accum_cck->mh_format_err,
1251 pos += scnprintf(buf + pos, bufsz - pos, 1326 delta_cck->mh_format_err, max_cck->mh_format_err);
1252 "re_acq_main_rssi_sum:\t%u\t\t\t%u\n", 1327 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1328 "re_acq_main_rssi_sum:",
1253 le32_to_cpu(cck->re_acq_main_rssi_sum), 1329 le32_to_cpu(cck->re_acq_main_rssi_sum),
1254 accum_cck->re_acq_main_rssi_sum); 1330 accum_cck->re_acq_main_rssi_sum,
1255 1331 delta_cck->re_acq_main_rssi_sum,
1256 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - GENERAL:\n"); 1332 max_cck->re_acq_main_rssi_sum);
1257 pos += scnprintf(buf + pos, bufsz - pos, 1333
1258 "\t\t\tcurrent\t\t\taccumulative\n"); 1334 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
1259 pos += scnprintf(buf + pos, bufsz - pos, "bogus_cts:\t\t%u\t\t\t%u\n", 1335 "Statistics_Rx - GENERAL:");
1336 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1337 "bogus_cts:",
1260 le32_to_cpu(general->bogus_cts), 1338 le32_to_cpu(general->bogus_cts),
1261 accum_general->bogus_cts); 1339 accum_general->bogus_cts,
1262 pos += scnprintf(buf + pos, bufsz - pos, "bogus_ack:\t\t%u\t\t\t%u\n", 1340 delta_general->bogus_cts, max_general->bogus_cts);
1341 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1342 "bogus_ack:",
1263 le32_to_cpu(general->bogus_ack), 1343 le32_to_cpu(general->bogus_ack),
1264 accum_general->bogus_ack); 1344 accum_general->bogus_ack,
1265 pos += scnprintf(buf + pos, bufsz - pos, 1345 delta_general->bogus_ack, max_general->bogus_ack);
1266 "non_bssid_frames:\t%u\t\t\t%u\n", 1346 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1347 "non_bssid_frames:",
1267 le32_to_cpu(general->non_bssid_frames), 1348 le32_to_cpu(general->non_bssid_frames),
1268 accum_general->non_bssid_frames); 1349 accum_general->non_bssid_frames,
1269 pos += scnprintf(buf + pos, bufsz - pos, 1350 delta_general->non_bssid_frames,
1270 "filtered_frames:\t%u\t\t\t%u\n", 1351 max_general->non_bssid_frames);
1352 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1353 "filtered_frames:",
1271 le32_to_cpu(general->filtered_frames), 1354 le32_to_cpu(general->filtered_frames),
1272 accum_general->filtered_frames); 1355 accum_general->filtered_frames,
1273 pos += scnprintf(buf + pos, bufsz - pos, 1356 delta_general->filtered_frames,
1274 "non_channel_beacons:\t%u\t\t\t%u\n", 1357 max_general->filtered_frames);
1358 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1359 "non_channel_beacons:",
1275 le32_to_cpu(general->non_channel_beacons), 1360 le32_to_cpu(general->non_channel_beacons),
1276 accum_general->non_channel_beacons); 1361 accum_general->non_channel_beacons,
1277 pos += scnprintf(buf + pos, bufsz - pos, 1362 delta_general->non_channel_beacons,
1278 "channel_beacons:\t%u\t\t\t%u\n", 1363 max_general->non_channel_beacons);
1364 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1365 "channel_beacons:",
1279 le32_to_cpu(general->channel_beacons), 1366 le32_to_cpu(general->channel_beacons),
1280 accum_general->channel_beacons); 1367 accum_general->channel_beacons,
1281 pos += scnprintf(buf + pos, bufsz - pos, 1368 delta_general->channel_beacons,
1282 "num_missed_bcon:\t%u\t\t\t%u\n", 1369 max_general->channel_beacons);
1370 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1371 "num_missed_bcon:",
1283 le32_to_cpu(general->num_missed_bcon), 1372 le32_to_cpu(general->num_missed_bcon),
1284 accum_general->num_missed_bcon); 1373 accum_general->num_missed_bcon,
1285 pos += scnprintf(buf + pos, bufsz - pos, 1374 delta_general->num_missed_bcon,
1286 "adc_rx_saturation_time:\t%u\t\t\t%u\n", 1375 max_general->num_missed_bcon);
1376 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1377 "adc_rx_saturation_time:",
1287 le32_to_cpu(general->adc_rx_saturation_time), 1378 le32_to_cpu(general->adc_rx_saturation_time),
1288 accum_general->adc_rx_saturation_time); 1379 accum_general->adc_rx_saturation_time,
1289 pos += scnprintf(buf + pos, bufsz - pos, 1380 delta_general->adc_rx_saturation_time,
1290 "ina_detect_search_tm:\t%u\t\t\t%u\n", 1381 max_general->adc_rx_saturation_time);
1382 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1383 "ina_detect_search_tm:",
1291 le32_to_cpu(general->ina_detection_search_time), 1384 le32_to_cpu(general->ina_detection_search_time),
1292 accum_general->ina_detection_search_time); 1385 accum_general->ina_detection_search_time,
1293 pos += scnprintf(buf + pos, bufsz - pos, 1386 delta_general->ina_detection_search_time,
1294 "beacon_silence_rssi_a:\t%u\t\t\t%u\n", 1387 max_general->ina_detection_search_time);
1388 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1389 "beacon_silence_rssi_a:",
1295 le32_to_cpu(general->beacon_silence_rssi_a), 1390 le32_to_cpu(general->beacon_silence_rssi_a),
1296 accum_general->beacon_silence_rssi_a); 1391 accum_general->beacon_silence_rssi_a,
1297 pos += scnprintf(buf + pos, bufsz - pos, 1392 delta_general->beacon_silence_rssi_a,
1298 "beacon_silence_rssi_b:\t%u\t\t\t%u\n", 1393 max_general->beacon_silence_rssi_a);
1394 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1395 "beacon_silence_rssi_b:",
1299 le32_to_cpu(general->beacon_silence_rssi_b), 1396 le32_to_cpu(general->beacon_silence_rssi_b),
1300 accum_general->beacon_silence_rssi_b); 1397 accum_general->beacon_silence_rssi_b,
1301 pos += scnprintf(buf + pos, bufsz - pos, 1398 delta_general->beacon_silence_rssi_b,
1302 "beacon_silence_rssi_c:\t%u\t\t\t%u\n", 1399 max_general->beacon_silence_rssi_b);
1400 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1401 "beacon_silence_rssi_c:",
1303 le32_to_cpu(general->beacon_silence_rssi_c), 1402 le32_to_cpu(general->beacon_silence_rssi_c),
1304 accum_general->beacon_silence_rssi_c); 1403 accum_general->beacon_silence_rssi_c,
1305 pos += scnprintf(buf + pos, bufsz - pos, 1404 delta_general->beacon_silence_rssi_c,
1306 "interference_data_flag:\t%u\t\t\t%u\n", 1405 max_general->beacon_silence_rssi_c);
1406 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1407 "interference_data_flag:",
1307 le32_to_cpu(general->interference_data_flag), 1408 le32_to_cpu(general->interference_data_flag),
1308 accum_general->interference_data_flag); 1409 accum_general->interference_data_flag,
1309 pos += scnprintf(buf + pos, bufsz - pos, 1410 delta_general->interference_data_flag,
1310 "channel_load:\t\t%u\t\t\t%u\n", 1411 max_general->interference_data_flag);
1412 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1413 "channel_load:",
1311 le32_to_cpu(general->channel_load), 1414 le32_to_cpu(general->channel_load),
1312 accum_general->channel_load); 1415 accum_general->channel_load,
1313 pos += scnprintf(buf + pos, bufsz - pos, 1416 delta_general->channel_load,
1314 "dsp_false_alarms:\t%u\t\t\t%u\n", 1417 max_general->channel_load);
1418 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1419 "dsp_false_alarms:",
1315 le32_to_cpu(general->dsp_false_alarms), 1420 le32_to_cpu(general->dsp_false_alarms),
1316 accum_general->dsp_false_alarms); 1421 accum_general->dsp_false_alarms,
1317 pos += scnprintf(buf + pos, bufsz - pos, 1422 delta_general->dsp_false_alarms,
1318 "beacon_rssi_a:\t\t%u\t\t\t%u\n", 1423 max_general->dsp_false_alarms);
1424 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1425 "beacon_rssi_a:",
1319 le32_to_cpu(general->beacon_rssi_a), 1426 le32_to_cpu(general->beacon_rssi_a),
1320 accum_general->beacon_rssi_a); 1427 accum_general->beacon_rssi_a,
1321 pos += scnprintf(buf + pos, bufsz - pos, 1428 delta_general->beacon_rssi_a,
1322 "beacon_rssi_b:\t\t%u\t\t\t%u\n", 1429 max_general->beacon_rssi_a);
1430 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1431 "beacon_rssi_b:",
1323 le32_to_cpu(general->beacon_rssi_b), 1432 le32_to_cpu(general->beacon_rssi_b),
1324 accum_general->beacon_rssi_b); 1433 accum_general->beacon_rssi_b,
1325 pos += scnprintf(buf + pos, bufsz - pos, 1434 delta_general->beacon_rssi_b,
1326 "beacon_rssi_c:\t\t%u\t\t\t%u\n", 1435 max_general->beacon_rssi_b);
1436 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1437 "beacon_rssi_c:",
1327 le32_to_cpu(general->beacon_rssi_c), 1438 le32_to_cpu(general->beacon_rssi_c),
1328 accum_general->beacon_rssi_c); 1439 accum_general->beacon_rssi_c,
1329 pos += scnprintf(buf + pos, bufsz - pos, 1440 delta_general->beacon_rssi_c,
1330 "beacon_energy_a:\t%u\t\t\t%u\n", 1441 max_general->beacon_rssi_c);
1442 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1443 "beacon_energy_a:",
1331 le32_to_cpu(general->beacon_energy_a), 1444 le32_to_cpu(general->beacon_energy_a),
1332 accum_general->beacon_energy_a); 1445 accum_general->beacon_energy_a,
1333 pos += scnprintf(buf + pos, bufsz - pos, 1446 delta_general->beacon_energy_a,
1334 "beacon_energy_b:\t%u\t\t\t%u\n", 1447 max_general->beacon_energy_a);
1448 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1449 "beacon_energy_b:",
1335 le32_to_cpu(general->beacon_energy_b), 1450 le32_to_cpu(general->beacon_energy_b),
1336 accum_general->beacon_energy_b); 1451 accum_general->beacon_energy_b,
1337 pos += scnprintf(buf + pos, bufsz - pos, 1452 delta_general->beacon_energy_b,
1338 "beacon_energy_c:\t%u\t\t\t%u\n", 1453 max_general->beacon_energy_b);
1454 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1455 "beacon_energy_c:",
1339 le32_to_cpu(general->beacon_energy_c), 1456 le32_to_cpu(general->beacon_energy_c),
1340 accum_general->beacon_energy_c); 1457 accum_general->beacon_energy_c,
1458 delta_general->beacon_energy_c,
1459 max_general->beacon_energy_c);
1341 1460
1342 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n"); 1461 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n");
1343 pos += scnprintf(buf + pos, bufsz - pos, 1462 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
1344 "\t\t\tcurrent\t\t\taccumulative\n"); 1463 "Statistics_Rx - OFDM_HT:");
1345 pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n", 1464 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1346 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err); 1465 "plcp_err:",
1347 pos += scnprintf(buf + pos, bufsz - pos, 1466 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
1348 "overrun_err:\t\t%u\t\t\t%u\n", 1467 delta_ht->plcp_err, max_ht->plcp_err);
1349 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err); 1468 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1350 pos += scnprintf(buf + pos, bufsz - pos, 1469 "overrun_err:",
1351 "early_overrun_err:\t%u\t\t\t%u\n", 1470 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
1471 delta_ht->overrun_err, max_ht->overrun_err);
1472 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1473 "early_overrun_err:",
1352 le32_to_cpu(ht->early_overrun_err), 1474 le32_to_cpu(ht->early_overrun_err),
1353 accum_ht->early_overrun_err); 1475 accum_ht->early_overrun_err,
1354 pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n", 1476 delta_ht->early_overrun_err,
1355 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good); 1477 max_ht->early_overrun_err);
1356 pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n", 1478 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1357 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err); 1479 "crc32_good:",
1358 pos += scnprintf(buf + pos, bufsz - pos, 1480 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
1359 "mh_format_err:\t\t%u\t\t\t%u\n", 1481 delta_ht->crc32_good, max_ht->crc32_good);
1482 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1483 "crc32_err:",
1484 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
1485 delta_ht->crc32_err, max_ht->crc32_err);
1486 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1487 "mh_format_err:",
1360 le32_to_cpu(ht->mh_format_err), 1488 le32_to_cpu(ht->mh_format_err),
1361 accum_ht->mh_format_err); 1489 accum_ht->mh_format_err,
1362 pos += scnprintf(buf + pos, bufsz - pos, 1490 delta_ht->mh_format_err, max_ht->mh_format_err);
1363 "agg_crc32_good:\t\t%u\t\t\t%u\n", 1491 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1492 "agg_crc32_good:",
1364 le32_to_cpu(ht->agg_crc32_good), 1493 le32_to_cpu(ht->agg_crc32_good),
1365 accum_ht->agg_crc32_good); 1494 accum_ht->agg_crc32_good,
1366 pos += scnprintf(buf + pos, bufsz - pos, 1495 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
1367 "agg_mpdu_cnt:\t\t%u\t\t\t%u\n", 1496 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1497 "agg_mpdu_cnt:",
1368 le32_to_cpu(ht->agg_mpdu_cnt), 1498 le32_to_cpu(ht->agg_mpdu_cnt),
1369 accum_ht->agg_mpdu_cnt); 1499 accum_ht->agg_mpdu_cnt,
1370 pos += scnprintf(buf + pos, bufsz - pos, "agg_cnt:\t\t%u\t\t\t%u\n", 1500 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
1371 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt); 1501 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1502 "agg_cnt:",
1503 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
1504 delta_ht->agg_cnt, max_ht->agg_cnt);
1505 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1506 "unsupport_mcs:",
1507 le32_to_cpu(ht->unsupport_mcs),
1508 accum_ht->unsupport_mcs,
1509 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
1372 1510
1373 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1511 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1374 kfree(buf); 1512 kfree(buf);
@@ -1379,26 +1517,16 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1379 char __user *user_buf, 1517 char __user *user_buf,
1380 size_t count, loff_t *ppos) 1518 size_t count, loff_t *ppos)
1381{ 1519{
1382 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1520 struct iwl_priv *priv = file->private_data;
1383 int pos = 0; 1521 int pos = 0;
1384 char *buf; 1522 char *buf;
1385 int bufsz = (sizeof(struct statistics_tx) * 24) + 250; 1523 int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
1386 ssize_t ret; 1524 ssize_t ret;
1387 struct statistics_tx *tx, *accum_tx; 1525 struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
1388 1526
1389 if (!iwl_is_alive(priv)) 1527 if (!iwl_is_alive(priv))
1390 return -EAGAIN; 1528 return -EAGAIN;
1391 1529
1392 /* make request to uCode to retrieve statistics information */
1393 mutex_lock(&priv->mutex);
1394 ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
1395 mutex_unlock(&priv->mutex);
1396
1397 if (ret) {
1398 IWL_ERR(priv,
1399 "Error sending statistics request: %zd\n", ret);
1400 return -EAGAIN;
1401 }
1402 buf = kzalloc(bufsz, GFP_KERNEL); 1530 buf = kzalloc(bufsz, GFP_KERNEL);
1403 if (!buf) { 1531 if (!buf) {
1404 IWL_ERR(priv, "Can not allocate Buffer\n"); 1532 IWL_ERR(priv, "Can not allocate Buffer\n");
@@ -1411,106 +1539,148 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1411 */ 1539 */
1412 tx = &priv->statistics.tx; 1540 tx = &priv->statistics.tx;
1413 accum_tx = &priv->accum_statistics.tx; 1541 accum_tx = &priv->accum_statistics.tx;
1542 delta_tx = &priv->delta_statistics.tx;
1543 max_tx = &priv->max_delta.tx;
1414 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); 1544 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
1415 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Tx:\n"); 1545 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
1416 pos += scnprintf(buf + pos, bufsz - pos, 1546 "Statistics_Tx:");
1417 "\t\t\tcurrent\t\t\taccumulative\n"); 1547 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1418 pos += scnprintf(buf + pos, bufsz - pos, "preamble:\t\t\t%u\t\t\t%u\n", 1548 "preamble:",
1419 le32_to_cpu(tx->preamble_cnt), 1549 le32_to_cpu(tx->preamble_cnt),
1420 accum_tx->preamble_cnt); 1550 accum_tx->preamble_cnt,
1421 pos += scnprintf(buf + pos, bufsz - pos, 1551 delta_tx->preamble_cnt, max_tx->preamble_cnt);
1422 "rx_detected_cnt:\t\t%u\t\t\t%u\n", 1552 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1553 "rx_detected_cnt:",
1423 le32_to_cpu(tx->rx_detected_cnt), 1554 le32_to_cpu(tx->rx_detected_cnt),
1424 accum_tx->rx_detected_cnt); 1555 accum_tx->rx_detected_cnt,
1425 pos += scnprintf(buf + pos, bufsz - pos, 1556 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
1426 "bt_prio_defer_cnt:\t\t%u\t\t\t%u\n", 1557 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1558 "bt_prio_defer_cnt:",
1427 le32_to_cpu(tx->bt_prio_defer_cnt), 1559 le32_to_cpu(tx->bt_prio_defer_cnt),
1428 accum_tx->bt_prio_defer_cnt); 1560 accum_tx->bt_prio_defer_cnt,
1429 pos += scnprintf(buf + pos, bufsz - pos, 1561 delta_tx->bt_prio_defer_cnt,
1430 "bt_prio_kill_cnt:\t\t%u\t\t\t%u\n", 1562 max_tx->bt_prio_defer_cnt);
1563 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1564 "bt_prio_kill_cnt:",
1431 le32_to_cpu(tx->bt_prio_kill_cnt), 1565 le32_to_cpu(tx->bt_prio_kill_cnt),
1432 accum_tx->bt_prio_kill_cnt); 1566 accum_tx->bt_prio_kill_cnt,
1433 pos += scnprintf(buf + pos, bufsz - pos, 1567 delta_tx->bt_prio_kill_cnt,
1434 "few_bytes_cnt:\t\t\t%u\t\t\t%u\n", 1568 max_tx->bt_prio_kill_cnt);
1569 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1570 "few_bytes_cnt:",
1435 le32_to_cpu(tx->few_bytes_cnt), 1571 le32_to_cpu(tx->few_bytes_cnt),
1436 accum_tx->few_bytes_cnt); 1572 accum_tx->few_bytes_cnt,
1437 pos += scnprintf(buf + pos, bufsz - pos, 1573 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
1438 "cts_timeout:\t\t\t%u\t\t\t%u\n", 1574 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1439 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout); 1575 "cts_timeout:",
1440 pos += scnprintf(buf + pos, bufsz - pos, 1576 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
1441 "ack_timeout:\t\t\t%u\t\t\t%u\n", 1577 delta_tx->cts_timeout, max_tx->cts_timeout);
1578 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1579 "ack_timeout:",
1442 le32_to_cpu(tx->ack_timeout), 1580 le32_to_cpu(tx->ack_timeout),
1443 accum_tx->ack_timeout); 1581 accum_tx->ack_timeout,
1444 pos += scnprintf(buf + pos, bufsz - pos, 1582 delta_tx->ack_timeout, max_tx->ack_timeout);
1445 "expected_ack_cnt:\t\t%u\t\t\t%u\n", 1583 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1584 "expected_ack_cnt:",
1446 le32_to_cpu(tx->expected_ack_cnt), 1585 le32_to_cpu(tx->expected_ack_cnt),
1447 accum_tx->expected_ack_cnt); 1586 accum_tx->expected_ack_cnt,
1448 pos += scnprintf(buf + pos, bufsz - pos, 1587 delta_tx->expected_ack_cnt,
1449 "actual_ack_cnt:\t\t\t%u\t\t\t%u\n", 1588 max_tx->expected_ack_cnt);
1589 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1590 "actual_ack_cnt:",
1450 le32_to_cpu(tx->actual_ack_cnt), 1591 le32_to_cpu(tx->actual_ack_cnt),
1451 accum_tx->actual_ack_cnt); 1592 accum_tx->actual_ack_cnt,
1452 pos += scnprintf(buf + pos, bufsz - pos, 1593 delta_tx->actual_ack_cnt,
1453 "dump_msdu_cnt:\t\t\t%u\t\t\t%u\n", 1594 max_tx->actual_ack_cnt);
1595 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1596 "dump_msdu_cnt:",
1454 le32_to_cpu(tx->dump_msdu_cnt), 1597 le32_to_cpu(tx->dump_msdu_cnt),
1455 accum_tx->dump_msdu_cnt); 1598 accum_tx->dump_msdu_cnt,
1456 pos += scnprintf(buf + pos, bufsz - pos, 1599 delta_tx->dump_msdu_cnt,
1457 "abort_nxt_frame_mismatch:" 1600 max_tx->dump_msdu_cnt);
1458 "\t%u\t\t\t%u\n", 1601 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1602 "abort_nxt_frame_mismatch:",
1459 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt), 1603 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
1460 accum_tx->burst_abort_next_frame_mismatch_cnt); 1604 accum_tx->burst_abort_next_frame_mismatch_cnt,
1461 pos += scnprintf(buf + pos, bufsz - pos, 1605 delta_tx->burst_abort_next_frame_mismatch_cnt,
1462 "abort_missing_nxt_frame:" 1606 max_tx->burst_abort_next_frame_mismatch_cnt);
1463 "\t%u\t\t\t%u\n", 1607 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1608 "abort_missing_nxt_frame:",
1464 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt), 1609 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
1465 accum_tx->burst_abort_missing_next_frame_cnt); 1610 accum_tx->burst_abort_missing_next_frame_cnt,
1466 pos += scnprintf(buf + pos, bufsz - pos, 1611 delta_tx->burst_abort_missing_next_frame_cnt,
1467 "cts_timeout_collision:\t\t%u\t\t\t%u\n", 1612 max_tx->burst_abort_missing_next_frame_cnt);
1613 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1614 "cts_timeout_collision:",
1468 le32_to_cpu(tx->cts_timeout_collision), 1615 le32_to_cpu(tx->cts_timeout_collision),
1469 accum_tx->cts_timeout_collision); 1616 accum_tx->cts_timeout_collision,
1470 pos += scnprintf(buf + pos, bufsz - pos, 1617 delta_tx->cts_timeout_collision,
1471 "ack_ba_timeout_collision:\t%u\t\t\t%u\n", 1618 max_tx->cts_timeout_collision);
1619 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1620 "ack_ba_timeout_collision:",
1472 le32_to_cpu(tx->ack_or_ba_timeout_collision), 1621 le32_to_cpu(tx->ack_or_ba_timeout_collision),
1473 accum_tx->ack_or_ba_timeout_collision); 1622 accum_tx->ack_or_ba_timeout_collision,
1474 pos += scnprintf(buf + pos, bufsz - pos, 1623 delta_tx->ack_or_ba_timeout_collision,
1475 "agg ba_timeout:\t\t\t%u\t\t\t%u\n", 1624 max_tx->ack_or_ba_timeout_collision);
1625 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1626 "agg ba_timeout:",
1476 le32_to_cpu(tx->agg.ba_timeout), 1627 le32_to_cpu(tx->agg.ba_timeout),
1477 accum_tx->agg.ba_timeout); 1628 accum_tx->agg.ba_timeout,
1478 pos += scnprintf(buf + pos, bufsz - pos, 1629 delta_tx->agg.ba_timeout,
1479 "agg ba_resched_frames:\t\t%u\t\t\t%u\n", 1630 max_tx->agg.ba_timeout);
1631 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1632 "agg ba_resched_frames:",
1480 le32_to_cpu(tx->agg.ba_reschedule_frames), 1633 le32_to_cpu(tx->agg.ba_reschedule_frames),
1481 accum_tx->agg.ba_reschedule_frames); 1634 accum_tx->agg.ba_reschedule_frames,
1482 pos += scnprintf(buf + pos, bufsz - pos, 1635 delta_tx->agg.ba_reschedule_frames,
1483 "agg scd_query_agg_frame:\t%u\t\t\t%u\n", 1636 max_tx->agg.ba_reschedule_frames);
1637 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1638 "agg scd_query_agg_frame:",
1484 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt), 1639 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
1485 accum_tx->agg.scd_query_agg_frame_cnt); 1640 accum_tx->agg.scd_query_agg_frame_cnt,
1486 pos += scnprintf(buf + pos, bufsz - pos, 1641 delta_tx->agg.scd_query_agg_frame_cnt,
1487 "agg scd_query_no_agg:\t\t%u\t\t\t%u\n", 1642 max_tx->agg.scd_query_agg_frame_cnt);
1643 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1644 "agg scd_query_no_agg:",
1488 le32_to_cpu(tx->agg.scd_query_no_agg), 1645 le32_to_cpu(tx->agg.scd_query_no_agg),
1489 accum_tx->agg.scd_query_no_agg); 1646 accum_tx->agg.scd_query_no_agg,
1490 pos += scnprintf(buf + pos, bufsz - pos, 1647 delta_tx->agg.scd_query_no_agg,
1491 "agg scd_query_agg:\t\t%u\t\t\t%u\n", 1648 max_tx->agg.scd_query_no_agg);
1649 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1650 "agg scd_query_agg:",
1492 le32_to_cpu(tx->agg.scd_query_agg), 1651 le32_to_cpu(tx->agg.scd_query_agg),
1493 accum_tx->agg.scd_query_agg); 1652 accum_tx->agg.scd_query_agg,
1494 pos += scnprintf(buf + pos, bufsz - pos, 1653 delta_tx->agg.scd_query_agg,
1495 "agg scd_query_mismatch:\t\t%u\t\t\t%u\n", 1654 max_tx->agg.scd_query_agg);
1655 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1656 "agg scd_query_mismatch:",
1496 le32_to_cpu(tx->agg.scd_query_mismatch), 1657 le32_to_cpu(tx->agg.scd_query_mismatch),
1497 accum_tx->agg.scd_query_mismatch); 1658 accum_tx->agg.scd_query_mismatch,
1498 pos += scnprintf(buf + pos, bufsz - pos, 1659 delta_tx->agg.scd_query_mismatch,
1499 "agg frame_not_ready:\t\t%u\t\t\t%u\n", 1660 max_tx->agg.scd_query_mismatch);
1661 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1662 "agg frame_not_ready:",
1500 le32_to_cpu(tx->agg.frame_not_ready), 1663 le32_to_cpu(tx->agg.frame_not_ready),
1501 accum_tx->agg.frame_not_ready); 1664 accum_tx->agg.frame_not_ready,
1502 pos += scnprintf(buf + pos, bufsz - pos, 1665 delta_tx->agg.frame_not_ready,
1503 "agg underrun:\t\t\t%u\t\t\t%u\n", 1666 max_tx->agg.frame_not_ready);
1667 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1668 "agg underrun:",
1504 le32_to_cpu(tx->agg.underrun), 1669 le32_to_cpu(tx->agg.underrun),
1505 accum_tx->agg.underrun); 1670 accum_tx->agg.underrun,
1506 pos += scnprintf(buf + pos, bufsz - pos, 1671 delta_tx->agg.underrun, max_tx->agg.underrun);
1507 "agg bt_prio_kill:\t\t%u\t\t\t%u\n", 1672 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1673 "agg bt_prio_kill:",
1508 le32_to_cpu(tx->agg.bt_prio_kill), 1674 le32_to_cpu(tx->agg.bt_prio_kill),
1509 accum_tx->agg.bt_prio_kill); 1675 accum_tx->agg.bt_prio_kill,
1510 pos += scnprintf(buf + pos, bufsz - pos, 1676 delta_tx->agg.bt_prio_kill,
1511 "agg rx_ba_rsp_cnt:\t\t%u\t\t\t%u\n", 1677 max_tx->agg.bt_prio_kill);
1678 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1679 "agg rx_ba_rsp_cnt:",
1512 le32_to_cpu(tx->agg.rx_ba_rsp_cnt), 1680 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
1513 accum_tx->agg.rx_ba_rsp_cnt); 1681 accum_tx->agg.rx_ba_rsp_cnt,
1682 delta_tx->agg.rx_ba_rsp_cnt,
1683 max_tx->agg.rx_ba_rsp_cnt);
1514 1684
1515 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1685 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1516 kfree(buf); 1686 kfree(buf);
@@ -1521,28 +1691,19 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
1521 char __user *user_buf, 1691 char __user *user_buf,
1522 size_t count, loff_t *ppos) 1692 size_t count, loff_t *ppos)
1523{ 1693{
1524 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1694 struct iwl_priv *priv = file->private_data;
1525 int pos = 0; 1695 int pos = 0;
1526 char *buf; 1696 char *buf;
1527 int bufsz = sizeof(struct statistics_general) * 4 + 250; 1697 int bufsz = sizeof(struct statistics_general) * 10 + 300;
1528 ssize_t ret; 1698 ssize_t ret;
1529 struct statistics_general *general, *accum_general; 1699 struct statistics_general *general, *accum_general;
1530 struct statistics_dbg *dbg, *accum_dbg; 1700 struct statistics_general *delta_general, *max_general;
1531 struct statistics_div *div, *accum_div; 1701 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
1702 struct statistics_div *div, *accum_div, *delta_div, *max_div;
1532 1703
1533 if (!iwl_is_alive(priv)) 1704 if (!iwl_is_alive(priv))
1534 return -EAGAIN; 1705 return -EAGAIN;
1535 1706
1536 /* make request to uCode to retrieve statistics information */
1537 mutex_lock(&priv->mutex);
1538 ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
1539 mutex_unlock(&priv->mutex);
1540
1541 if (ret) {
1542 IWL_ERR(priv,
1543 "Error sending statistics request: %zd\n", ret);
1544 return -EAGAIN;
1545 }
1546 buf = kzalloc(bufsz, GFP_KERNEL); 1707 buf = kzalloc(bufsz, GFP_KERNEL);
1547 if (!buf) { 1708 if (!buf) {
1548 IWL_ERR(priv, "Can not allocate Buffer\n"); 1709 IWL_ERR(priv, "Can not allocate Buffer\n");
@@ -1557,52 +1718,78 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
1557 dbg = &priv->statistics.general.dbg; 1718 dbg = &priv->statistics.general.dbg;
1558 div = &priv->statistics.general.div; 1719 div = &priv->statistics.general.div;
1559 accum_general = &priv->accum_statistics.general; 1720 accum_general = &priv->accum_statistics.general;
1721 delta_general = &priv->delta_statistics.general;
1722 max_general = &priv->max_delta.general;
1560 accum_dbg = &priv->accum_statistics.general.dbg; 1723 accum_dbg = &priv->accum_statistics.general.dbg;
1724 delta_dbg = &priv->delta_statistics.general.dbg;
1725 max_dbg = &priv->max_delta.general.dbg;
1561 accum_div = &priv->accum_statistics.general.div; 1726 accum_div = &priv->accum_statistics.general.div;
1727 delta_div = &priv->delta_statistics.general.div;
1728 max_div = &priv->max_delta.general.div;
1562 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); 1729 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
1563 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_General:\n"); 1730 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
1564 pos += scnprintf(buf + pos, bufsz - pos, 1731 "Statistics_General:");
1565 "\t\t\tcurrent\t\t\taccumulative\n"); 1732 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_short_format,
1566 pos += scnprintf(buf + pos, bufsz - pos, "temperature:\t\t\t%u\n", 1733 "temperature:",
1567 le32_to_cpu(general->temperature)); 1734 le32_to_cpu(general->temperature));
1568 pos += scnprintf(buf + pos, bufsz - pos, "temperature_m:\t\t\t%u\n", 1735 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_short_format,
1736 "temperature_m:",
1569 le32_to_cpu(general->temperature_m)); 1737 le32_to_cpu(general->temperature_m));
1570 pos += scnprintf(buf + pos, bufsz - pos, 1738 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1571 "burst_check:\t\t\t%u\t\t\t%u\n", 1739 "burst_check:",
1572 le32_to_cpu(dbg->burst_check), 1740 le32_to_cpu(dbg->burst_check),
1573 accum_dbg->burst_check); 1741 accum_dbg->burst_check,
1574 pos += scnprintf(buf + pos, bufsz - pos, 1742 delta_dbg->burst_check, max_dbg->burst_check);
1575 "burst_count:\t\t\t%u\t\t\t%u\n", 1743 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1744 "burst_count:",
1576 le32_to_cpu(dbg->burst_count), 1745 le32_to_cpu(dbg->burst_count),
1577 accum_dbg->burst_count); 1746 accum_dbg->burst_count,
1578 pos += scnprintf(buf + pos, bufsz - pos, 1747 delta_dbg->burst_count, max_dbg->burst_count);
1579 "sleep_time:\t\t\t%u\t\t\t%u\n", 1748 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1749 "sleep_time:",
1580 le32_to_cpu(general->sleep_time), 1750 le32_to_cpu(general->sleep_time),
1581 accum_general->sleep_time); 1751 accum_general->sleep_time,
1582 pos += scnprintf(buf + pos, bufsz - pos, 1752 delta_general->sleep_time, max_general->sleep_time);
1583 "slots_out:\t\t\t%u\t\t\t%u\n", 1753 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1754 "slots_out:",
1584 le32_to_cpu(general->slots_out), 1755 le32_to_cpu(general->slots_out),
1585 accum_general->slots_out); 1756 accum_general->slots_out,
1586 pos += scnprintf(buf + pos, bufsz - pos, 1757 delta_general->slots_out, max_general->slots_out);
1587 "slots_idle:\t\t\t%u\t\t\t%u\n", 1758 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1759 "slots_idle:",
1588 le32_to_cpu(general->slots_idle), 1760 le32_to_cpu(general->slots_idle),
1589 accum_general->slots_idle); 1761 accum_general->slots_idle,
1762 delta_general->slots_idle, max_general->slots_idle);
1590 pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n", 1763 pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
1591 le32_to_cpu(general->ttl_timestamp)); 1764 le32_to_cpu(general->ttl_timestamp));
1592 pos += scnprintf(buf + pos, bufsz - pos, "tx_on_a:\t\t\t%u\t\t\t%u\n", 1765 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1593 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a); 1766 "tx_on_a:",
1594 pos += scnprintf(buf + pos, bufsz - pos, "tx_on_b:\t\t\t%u\t\t\t%u\n", 1767 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
1595 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b); 1768 delta_div->tx_on_a, max_div->tx_on_a);
1596 pos += scnprintf(buf + pos, bufsz - pos, 1769 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1597 "exec_time:\t\t\t%u\t\t\t%u\n", 1770 "tx_on_b:",
1598 le32_to_cpu(div->exec_time), accum_div->exec_time); 1771 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
1599 pos += scnprintf(buf + pos, bufsz - pos, 1772 delta_div->tx_on_b, max_div->tx_on_b);
1600 "probe_time:\t\t\t%u\t\t\t%u\n", 1773 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1601 le32_to_cpu(div->probe_time), accum_div->probe_time); 1774 "exec_time:",
1602 pos += scnprintf(buf + pos, bufsz - pos, 1775 le32_to_cpu(div->exec_time), accum_div->exec_time,
1603 "rx_enable_counter:\t\t%u\t\t\t%u\n", 1776 delta_div->exec_time, max_div->exec_time);
1777 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1778 "probe_time:",
1779 le32_to_cpu(div->probe_time), accum_div->probe_time,
1780 delta_div->probe_time, max_div->probe_time);
1781 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1782 "rx_enable_counter:",
1604 le32_to_cpu(general->rx_enable_counter), 1783 le32_to_cpu(general->rx_enable_counter),
1605 accum_general->rx_enable_counter); 1784 accum_general->rx_enable_counter,
1785 delta_general->rx_enable_counter,
1786 max_general->rx_enable_counter);
1787 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1788 "num_of_sos_states:",
1789 le32_to_cpu(general->num_of_sos_states),
1790 accum_general->num_of_sos_states,
1791 delta_general->num_of_sos_states,
1792 max_general->num_of_sos_states);
1606 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1793 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1607 kfree(buf); 1794 kfree(buf);
1608 return ret; 1795 return ret;
@@ -1612,7 +1799,7 @@ static ssize_t iwl_dbgfs_sensitivity_read(struct file *file,
1612 char __user *user_buf, 1799 char __user *user_buf,
1613 size_t count, loff_t *ppos) { 1800 size_t count, loff_t *ppos) {
1614 1801
1615 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1802 struct iwl_priv *priv = file->private_data;
1616 int pos = 0; 1803 int pos = 0;
1617 int cnt = 0; 1804 int cnt = 0;
1618 char *buf; 1805 char *buf;
@@ -1693,7 +1880,7 @@ static ssize_t iwl_dbgfs_chain_noise_read(struct file *file,
1693 char __user *user_buf, 1880 char __user *user_buf,
1694 size_t count, loff_t *ppos) { 1881 size_t count, loff_t *ppos) {
1695 1882
1696 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1883 struct iwl_priv *priv = file->private_data;
1697 int pos = 0; 1884 int pos = 0;
1698 int cnt = 0; 1885 int cnt = 0;
1699 char *buf; 1886 char *buf;
@@ -1751,26 +1938,15 @@ static ssize_t iwl_dbgfs_tx_power_read(struct file *file,
1751 char __user *user_buf, 1938 char __user *user_buf,
1752 size_t count, loff_t *ppos) { 1939 size_t count, loff_t *ppos) {
1753 1940
1754 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1941 struct iwl_priv *priv = file->private_data;
1755 char buf[128]; 1942 char buf[128];
1756 int pos = 0; 1943 int pos = 0;
1757 ssize_t ret;
1758 const size_t bufsz = sizeof(buf); 1944 const size_t bufsz = sizeof(buf);
1759 struct statistics_tx *tx; 1945 struct statistics_tx *tx;
1760 1946
1761 if (!iwl_is_alive(priv)) 1947 if (!iwl_is_alive(priv))
1762 pos += scnprintf(buf + pos, bufsz - pos, "N/A\n"); 1948 pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
1763 else { 1949 else {
1764 /* make request to uCode to retrieve statistics information */
1765 mutex_lock(&priv->mutex);
1766 ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
1767 mutex_unlock(&priv->mutex);
1768
1769 if (ret) {
1770 IWL_ERR(priv, "Error sending statistics request: %zd\n",
1771 ret);
1772 return -EAGAIN;
1773 }
1774 tx = &priv->statistics.tx; 1950 tx = &priv->statistics.tx;
1775 if (tx->tx_power.ant_a || 1951 if (tx->tx_power.ant_a ||
1776 tx->tx_power.ant_b || 1952 tx->tx_power.ant_b ||
@@ -1802,7 +1978,7 @@ static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
1802 char __user *user_buf, 1978 char __user *user_buf,
1803 size_t count, loff_t *ppos) 1979 size_t count, loff_t *ppos)
1804{ 1980{
1805 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1981 struct iwl_priv *priv = file->private_data;
1806 char buf[60]; 1982 char buf[60];
1807 int pos = 0; 1983 int pos = 0;
1808 const size_t bufsz = sizeof(buf); 1984 const size_t bufsz = sizeof(buf);
@@ -1845,6 +2021,262 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
1845 return count; 2021 return count;
1846} 2022}
1847 2023
2024static ssize_t iwl_dbgfs_csr_write(struct file *file,
2025 const char __user *user_buf,
2026 size_t count, loff_t *ppos)
2027{
2028 struct iwl_priv *priv = file->private_data;
2029 char buf[8];
2030 int buf_size;
2031 int csr;
2032
2033 memset(buf, 0, sizeof(buf));
2034 buf_size = min(count, sizeof(buf) - 1);
2035 if (copy_from_user(buf, user_buf, buf_size))
2036 return -EFAULT;
2037 if (sscanf(buf, "%d", &csr) != 1)
2038 return -EFAULT;
2039
2040 if (priv->cfg->ops->lib->dump_csr)
2041 priv->cfg->ops->lib->dump_csr(priv);
2042
2043 return count;
2044}
2045
2046static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file,
2047 char __user *user_buf,
2048 size_t count, loff_t *ppos) {
2049
2050 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
2051 int pos = 0;
2052 char buf[128];
2053 const size_t bufsz = sizeof(buf);
2054 ssize_t ret;
2055
2056 pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
2057 priv->event_log.ucode_trace ? "On" : "Off");
2058 pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n",
2059 priv->event_log.non_wraps_count);
2060 pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n",
2061 priv->event_log.wraps_once_count);
2062 pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
2063 priv->event_log.wraps_more_count);
2064
2065 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2066 return ret;
2067}
2068
2069static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
2070 const char __user *user_buf,
2071 size_t count, loff_t *ppos)
2072{
2073 struct iwl_priv *priv = file->private_data;
2074 char buf[8];
2075 int buf_size;
2076 int trace;
2077
2078 memset(buf, 0, sizeof(buf));
2079 buf_size = min(count, sizeof(buf) - 1);
2080 if (copy_from_user(buf, user_buf, buf_size))
2081 return -EFAULT;
2082 if (sscanf(buf, "%d", &trace) != 1)
2083 return -EFAULT;
2084
2085 if (trace) {
2086 priv->event_log.ucode_trace = true;
2087 /* schedule the ucode timer to occur in UCODE_TRACE_PERIOD */
2088 mod_timer(&priv->ucode_trace,
2089 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
2090 } else {
2091 priv->event_log.ucode_trace = false;
2092 del_timer_sync(&priv->ucode_trace);
2093 }
2094
2095 return count;
2096}
2097
2098static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2099 char __user *user_buf,
2100 size_t count, loff_t *ppos)
2101{
2102 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
2103 char *buf;
2104 int pos = 0;
2105 ssize_t ret = -EFAULT;
2106
2107 if (priv->cfg->ops->lib->dump_fh) {
2108 ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
2109 if (buf) {
2110 ret = simple_read_from_buffer(user_buf,
2111 count, ppos, buf, pos);
2112 kfree(buf);
2113 }
2114 }
2115
2116 return ret;
2117}
2118
2119static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file,
2120 char __user *user_buf,
2121 size_t count, loff_t *ppos) {
2122
2123 struct iwl_priv *priv = file->private_data;
2124 int pos = 0;
2125 char buf[12];
2126 const size_t bufsz = sizeof(buf);
2127 ssize_t ret;
2128
2129 pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
2130 priv->missed_beacon_threshold);
2131
2132 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2133 return ret;
2134}
2135
2136static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
2137 const char __user *user_buf,
2138 size_t count, loff_t *ppos)
2139{
2140 struct iwl_priv *priv = file->private_data;
2141 char buf[8];
2142 int buf_size;
2143 int missed;
2144
2145 memset(buf, 0, sizeof(buf));
2146 buf_size = min(count, sizeof(buf) - 1);
2147 if (copy_from_user(buf, user_buf, buf_size))
2148 return -EFAULT;
2149 if (sscanf(buf, "%d", &missed) != 1)
2150 return -EINVAL;
2151
2152 if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
2153 missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
2154 priv->missed_beacon_threshold =
2155 IWL_MISSED_BEACON_THRESHOLD_DEF;
2156 else
2157 priv->missed_beacon_threshold = missed;
2158
2159 return count;
2160}
2161
2162static ssize_t iwl_dbgfs_internal_scan_write(struct file *file,
2163 const char __user *user_buf,
2164 size_t count, loff_t *ppos)
2165{
2166 struct iwl_priv *priv = file->private_data;
2167 char buf[8];
2168 int buf_size;
2169 int scan;
2170
2171 memset(buf, 0, sizeof(buf));
2172 buf_size = min(count, sizeof(buf) - 1);
2173 if (copy_from_user(buf, user_buf, buf_size))
2174 return -EFAULT;
2175 if (sscanf(buf, "%d", &scan) != 1)
2176 return -EINVAL;
2177
2178 iwl_internal_short_hw_scan(priv);
2179
2180 return count;
2181}
2182
2183static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
2184 char __user *user_buf,
2185 size_t count, loff_t *ppos) {
2186
2187 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
2188 int pos = 0;
2189 char buf[12];
2190 const size_t bufsz = sizeof(buf);
2191 ssize_t ret;
2192
2193 pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
2194 priv->cfg->plcp_delta_threshold);
2195
2196 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2197 return ret;
2198}
2199
2200static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
2201 const char __user *user_buf,
2202 size_t count, loff_t *ppos) {
2203
2204 struct iwl_priv *priv = file->private_data;
2205 char buf[8];
2206 int buf_size;
2207 int plcp;
2208
2209 memset(buf, 0, sizeof(buf));
2210 buf_size = min(count, sizeof(buf) - 1);
2211 if (copy_from_user(buf, user_buf, buf_size))
2212 return -EFAULT;
2213 if (sscanf(buf, "%d", &plcp) != 1)
2214 return -EINVAL;
2215 if ((plcp <= IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
2216 (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
2217 priv->cfg->plcp_delta_threshold =
2218 IWL_MAX_PLCP_ERR_THRESHOLD_DEF;
2219 else
2220 priv->cfg->plcp_delta_threshold = plcp;
2221 return count;
2222}
2223
2224static ssize_t iwl_dbgfs_force_reset_read(struct file *file,
2225 char __user *user_buf,
2226 size_t count, loff_t *ppos) {
2227
2228 struct iwl_priv *priv = file->private_data;
2229 int i, pos = 0;
2230 char buf[300];
2231 const size_t bufsz = sizeof(buf);
2232 struct iwl_force_reset *force_reset;
2233
2234 for (i = 0; i < IWL_MAX_FORCE_RESET; i++) {
2235 force_reset = &priv->force_reset[i];
2236 pos += scnprintf(buf + pos, bufsz - pos,
2237 "Force reset method %d\n", i);
2238 pos += scnprintf(buf + pos, bufsz - pos,
2239 "\tnumber of reset request: %d\n",
2240 force_reset->reset_request_count);
2241 pos += scnprintf(buf + pos, bufsz - pos,
2242 "\tnumber of reset request success: %d\n",
2243 force_reset->reset_success_count);
2244 pos += scnprintf(buf + pos, bufsz - pos,
2245 "\tnumber of reset request reject: %d\n",
2246 force_reset->reset_reject_count);
2247 pos += scnprintf(buf + pos, bufsz - pos,
2248 "\treset duration: %lu\n",
2249 force_reset->reset_duration);
2250 }
2251 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2252}
2253
2254static ssize_t iwl_dbgfs_force_reset_write(struct file *file,
2255 const char __user *user_buf,
2256 size_t count, loff_t *ppos) {
2257
2258 struct iwl_priv *priv = file->private_data;
2259 char buf[8];
2260 int buf_size;
2261 int reset, ret;
2262
2263 memset(buf, 0, sizeof(buf));
2264 buf_size = min(count, sizeof(buf) - 1);
2265 if (copy_from_user(buf, user_buf, buf_size))
2266 return -EFAULT;
2267 if (sscanf(buf, "%d", &reset) != 1)
2268 return -EINVAL;
2269 switch (reset) {
2270 case IWL_RF_RESET:
2271 case IWL_FW_RESET:
2272 ret = iwl_force_reset(priv, reset);
2273 break;
2274 default:
2275 return -EINVAL;
2276 }
2277 return ret ? ret : count;
2278}
2279
1848DEBUGFS_READ_FILE_OPS(rx_statistics); 2280DEBUGFS_READ_FILE_OPS(rx_statistics);
1849DEBUGFS_READ_FILE_OPS(tx_statistics); 2281DEBUGFS_READ_FILE_OPS(tx_statistics);
1850DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); 2282DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
@@ -1859,6 +2291,13 @@ DEBUGFS_READ_FILE_OPS(tx_power);
1859DEBUGFS_READ_FILE_OPS(power_save_status); 2291DEBUGFS_READ_FILE_OPS(power_save_status);
1860DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics); 2292DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
1861DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics); 2293DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
2294DEBUGFS_WRITE_FILE_OPS(csr);
2295DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
2296DEBUGFS_READ_FILE_OPS(fh_reg);
2297DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
2298DEBUGFS_WRITE_FILE_OPS(internal_scan);
2299DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
2300DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
1862 2301
1863/* 2302/*
1864 * Create the debugfs files and directories 2303 * Create the debugfs files and directories
@@ -1866,69 +2305,74 @@ DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
1866 */ 2305 */
1867int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) 2306int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1868{ 2307{
1869 struct iwl_debugfs *dbgfs;
1870 struct dentry *phyd = priv->hw->wiphy->debugfsdir; 2308 struct dentry *phyd = priv->hw->wiphy->debugfsdir;
1871 int ret = 0; 2309 struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
1872 2310
1873 dbgfs = kzalloc(sizeof(struct iwl_debugfs), GFP_KERNEL); 2311 dir_drv = debugfs_create_dir(name, phyd);
1874 if (!dbgfs) { 2312 if (!dir_drv)
1875 ret = -ENOMEM; 2313 return -ENOMEM;
1876 goto err; 2314
1877 } 2315 priv->debugfs_dir = dir_drv;
1878 2316
1879 priv->dbgfs = dbgfs; 2317 dir_data = debugfs_create_dir("data", dir_drv);
1880 dbgfs->name = name; 2318 if (!dir_data)
1881 dbgfs->dir_drv = debugfs_create_dir(name, phyd); 2319 goto err;
1882 if (!dbgfs->dir_drv || IS_ERR(dbgfs->dir_drv)) { 2320 dir_rf = debugfs_create_dir("rf", dir_drv);
1883 ret = -ENOENT; 2321 if (!dir_rf)
2322 goto err;
2323 dir_debug = debugfs_create_dir("debug", dir_drv);
2324 if (!dir_debug)
1884 goto err; 2325 goto err;
1885 }
1886 2326
1887 DEBUGFS_ADD_DIR(data, dbgfs->dir_drv); 2327 DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
1888 DEBUGFS_ADD_DIR(rf, dbgfs->dir_drv); 2328 DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
1889 DEBUGFS_ADD_DIR(debug, dbgfs->dir_drv); 2329 DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR);
1890 DEBUGFS_ADD_FILE(nvm, data, S_IRUSR); 2330 DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
1891 DEBUGFS_ADD_FILE(sram, data, S_IWUSR | S_IRUSR); 2331 DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
1892 DEBUGFS_ADD_FILE(log_event, data, S_IWUSR); 2332 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
1893 DEBUGFS_ADD_FILE(stations, data, S_IRUSR); 2333 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
1894 DEBUGFS_ADD_FILE(channels, data, S_IRUSR); 2334 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
1895 DEBUGFS_ADD_FILE(status, data, S_IRUSR); 2335 DEBUGFS_ADD_FILE(led, dir_data, S_IRUSR);
1896 DEBUGFS_ADD_FILE(interrupt, data, S_IWUSR | S_IRUSR); 2336 DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR);
1897 DEBUGFS_ADD_FILE(qos, data, S_IRUSR); 2337 DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR);
1898 DEBUGFS_ADD_FILE(led, data, S_IRUSR); 2338 DEBUGFS_ADD_FILE(thermal_throttling, dir_data, S_IRUSR);
1899 DEBUGFS_ADD_FILE(sleep_level_override, data, S_IWUSR | S_IRUSR); 2339 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
1900 DEBUGFS_ADD_FILE(current_sleep_command, data, S_IRUSR); 2340 DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
1901 DEBUGFS_ADD_FILE(thermal_throttling, data, S_IRUSR); 2341 DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
1902 DEBUGFS_ADD_FILE(disable_ht40, data, S_IWUSR | S_IRUSR); 2342 DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
1903 DEBUGFS_ADD_FILE(rx_statistics, debug, S_IRUSR); 2343 DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
1904 DEBUGFS_ADD_FILE(tx_statistics, debug, S_IRUSR); 2344 DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
1905 DEBUGFS_ADD_FILE(traffic_log, debug, S_IWUSR | S_IRUSR); 2345 DEBUGFS_ADD_FILE(tx_power, dir_debug, S_IRUSR);
1906 DEBUGFS_ADD_FILE(rx_queue, debug, S_IRUSR); 2346 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
1907 DEBUGFS_ADD_FILE(tx_queue, debug, S_IRUSR); 2347 DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
1908 DEBUGFS_ADD_FILE(tx_power, debug, S_IRUSR); 2348 DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
1909 DEBUGFS_ADD_FILE(power_save_status, debug, S_IRUSR); 2349 DEBUGFS_ADD_FILE(csr, dir_debug, S_IWUSR);
1910 DEBUGFS_ADD_FILE(clear_ucode_statistics, debug, S_IWUSR); 2350 DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
1911 DEBUGFS_ADD_FILE(clear_traffic_statistics, debug, S_IWUSR); 2351 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
2352 DEBUGFS_ADD_FILE(internal_scan, dir_debug, S_IWUSR);
2353 DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
2354 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
1912 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) { 2355 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
1913 DEBUGFS_ADD_FILE(ucode_rx_stats, debug, S_IRUSR); 2356 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
1914 DEBUGFS_ADD_FILE(ucode_tx_stats, debug, S_IRUSR); 2357 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
1915 DEBUGFS_ADD_FILE(ucode_general_stats, debug, S_IRUSR); 2358 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
1916 DEBUGFS_ADD_FILE(sensitivity, debug, S_IRUSR); 2359 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
1917 DEBUGFS_ADD_FILE(chain_noise, debug, S_IRUSR); 2360 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
2361 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
1918 } 2362 }
1919 DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal); 2363 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, &priv->disable_sens_cal);
1920 DEBUGFS_ADD_BOOL(disable_chain_noise, rf, 2364 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
1921 &priv->disable_chain_noise_cal); 2365 &priv->disable_chain_noise_cal);
1922 if (((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) || 2366 if (((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) ||
1923 ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_3945)) 2367 ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_3945))
1924 DEBUGFS_ADD_BOOL(disable_tx_power, rf, 2368 DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
1925 &priv->disable_tx_power_cal); 2369 &priv->disable_tx_power_cal);
1926 return 0; 2370 return 0;
1927 2371
1928err: 2372err:
1929 IWL_ERR(priv, "Can't open the debugfs directory\n"); 2373 IWL_ERR(priv, "Can't create the debugfs directory\n");
1930 iwl_dbgfs_unregister(priv); 2374 iwl_dbgfs_unregister(priv);
1931 return ret; 2375 return -ENOMEM;
1932} 2376}
1933EXPORT_SYMBOL(iwl_dbgfs_register); 2377EXPORT_SYMBOL(iwl_dbgfs_register);
1934 2378
@@ -1938,56 +2382,11 @@ EXPORT_SYMBOL(iwl_dbgfs_register);
1938 */ 2382 */
1939void iwl_dbgfs_unregister(struct iwl_priv *priv) 2383void iwl_dbgfs_unregister(struct iwl_priv *priv)
1940{ 2384{
1941 if (!priv->dbgfs) 2385 if (!priv->debugfs_dir)
1942 return; 2386 return;
1943 2387
1944 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_sleep_level_override); 2388 debugfs_remove_recursive(priv->debugfs_dir);
1945 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_current_sleep_command); 2389 priv->debugfs_dir = NULL;
1946 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_nvm);
1947 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_sram);
1948 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_log_event);
1949 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_stations);
1950 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_channels);
1951 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_status);
1952 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_interrupt);
1953 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_qos);
1954 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_led);
1955 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_thermal_throttling);
1956 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_disable_ht40);
1957 DEBUGFS_REMOVE(priv->dbgfs->dir_data);
1958 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_rx_statistics);
1959 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_statistics);
1960 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_traffic_log);
1961 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_rx_queue);
1962 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_queue);
1963 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_power);
1964 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_power_save_status);
1965 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
1966 file_clear_ucode_statistics);
1967 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
1968 file_clear_traffic_statistics);
1969 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
1970 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
1971 file_ucode_rx_stats);
1972 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
1973 file_ucode_tx_stats);
1974 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
1975 file_ucode_general_stats);
1976 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
1977 file_sensitivity);
1978 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
1979 file_chain_noise);
1980 }
1981 DEBUGFS_REMOVE(priv->dbgfs->dir_debug);
1982 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_sensitivity);
1983 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_chain_noise);
1984 if (((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) ||
1985 ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_3945))
1986 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_tx_power);
1987 DEBUGFS_REMOVE(priv->dbgfs->dir_rf);
1988 DEBUGFS_REMOVE(priv->dbgfs->dir_drv);
1989 kfree(priv->dbgfs);
1990 priv->dbgfs = NULL;
1991} 2390}
1992EXPORT_SYMBOL(iwl_dbgfs_unregister); 2391EXPORT_SYMBOL(iwl_dbgfs_unregister);
1993 2392
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 3822cf53e368..ab891b958042 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -512,6 +512,7 @@ struct iwl_ht_config {
512 bool is_ht; 512 bool is_ht;
513 bool is_40mhz; 513 bool is_40mhz;
514 bool single_chain_sufficient; 514 bool single_chain_sufficient;
515 enum ieee80211_smps_mode smps; /* current smps mode */
515 /* BSS related data */ 516 /* BSS related data */
516 u8 extension_chan_offset; 517 u8 extension_chan_offset;
517 u8 ht_protection; 518 u8 ht_protection;
@@ -984,6 +985,74 @@ struct iwl_switch_rxon {
984 __le16 channel; 985 __le16 channel;
985}; 986};
986 987
988/*
989 * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
990 * to perform continuous uCode event logging operation if enabled
991 */
992#define UCODE_TRACE_PERIOD (100)
993
994/*
995 * iwl_event_log: current uCode event log position
996 *
997 * @ucode_trace: enable/disable ucode continuous trace timer
998 * @num_wraps: how many times the event buffer wraps
999 * @next_entry: the entry just before the next one that uCode would fill
1000 * @non_wraps_count: counter for no wrap detected when dump ucode events
1001 * @wraps_once_count: counter for wrap once detected when dump ucode events
1002 * @wraps_more_count: counter for wrap more than once detected
1003 * when dump ucode events
1004 */
1005struct iwl_event_log {
1006 bool ucode_trace;
1007 u32 num_wraps;
1008 u32 next_entry;
1009 int non_wraps_count;
1010 int wraps_once_count;
1011 int wraps_more_count;
1012};
1013
1014/*
1015 * host interrupt timeout value
1016 * used with setting interrupt coalescing timer
1017 * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
1018 *
1019 * default interrupt coalescing timer is 64 x 32 = 2048 usecs
1020 * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
1021 */
1022#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
1023#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
1024#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
1025#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
1026#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
1027#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
1028
1029/*
1030 * This is the threshold value of plcp error rate per 100mSecs. It is
1031 * used to set and check for the validity of plcp_delta.
1032 */
1033#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (0)
1034#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50)
1035#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100)
1036#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200)
1037#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
1038
1039#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
1040#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
1041
1042enum iwl_reset {
1043 IWL_RF_RESET = 0,
1044 IWL_FW_RESET,
1045 IWL_MAX_FORCE_RESET,
1046};
1047
1048struct iwl_force_reset {
1049 int reset_request_count;
1050 int reset_success_count;
1051 int reset_reject_count;
1052 unsigned long reset_duration;
1053 unsigned long last_force_reset_jiffies;
1054};
1055
987struct iwl_priv { 1056struct iwl_priv {
988 1057
989 /* ieee device used by generic ieee processing code */ 1058 /* ieee device used by generic ieee processing code */
@@ -1004,13 +1073,19 @@ struct iwl_priv {
1004 1073
1005 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; 1074 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
1006 1075
1007#if defined(CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT) || defined(CONFIG_IWL3945_SPECTRUM_MEASUREMENT)
1008 /* spectrum measurement report caching */ 1076 /* spectrum measurement report caching */
1009 struct iwl_spectrum_notification measure_report; 1077 struct iwl_spectrum_notification measure_report;
1010 u8 measurement_status; 1078 u8 measurement_status;
1011#endif 1079
1012 /* ucode beacon time */ 1080 /* ucode beacon time */
1013 u32 ucode_beacon_time; 1081 u32 ucode_beacon_time;
1082 int missed_beacon_threshold;
1083
1084 /* storing the jiffies when the plcp error rate is received */
1085 unsigned long plcp_jiffies;
1086
1087 /* force reset */
1088 struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
1014 1089
1015 /* we allocate array of iwl4965_channel_info for NIC's valid channels. 1090 /* we allocate array of iwl4965_channel_info for NIC's valid channels.
1016 * Access via channel # using indirect index array */ 1091 * Access via channel # using indirect index array */
@@ -1029,7 +1104,6 @@ struct iwl_priv {
1029 struct iwl_calib_result calib_results[IWL_CALIB_MAX]; 1104 struct iwl_calib_result calib_results[IWL_CALIB_MAX];
1030 1105
1031 /* Scan related variables */ 1106 /* Scan related variables */
1032 unsigned long last_scan_jiffies;
1033 unsigned long next_scan_jiffies; 1107 unsigned long next_scan_jiffies;
1034 unsigned long scan_start; 1108 unsigned long scan_start;
1035 unsigned long scan_pass_start; 1109 unsigned long scan_pass_start;
@@ -1037,6 +1111,7 @@ struct iwl_priv {
1037 void *scan; 1111 void *scan;
1038 int scan_bands; 1112 int scan_bands;
1039 struct cfg80211_scan_request *scan_request; 1113 struct cfg80211_scan_request *scan_request;
1114 bool is_internal_short_scan;
1040 u8 scan_tx_ant[IEEE80211_NUM_BANDS]; 1115 u8 scan_tx_ant[IEEE80211_NUM_BANDS];
1041 u8 mgmt_tx_ant; 1116 u8 mgmt_tx_ant;
1042 1117
@@ -1045,6 +1120,7 @@ struct iwl_priv {
1045 spinlock_t hcmd_lock; /* protect hcmd */ 1120 spinlock_t hcmd_lock; /* protect hcmd */
1046 spinlock_t reg_lock; /* protect hw register access */ 1121 spinlock_t reg_lock; /* protect hw register access */
1047 struct mutex mutex; 1122 struct mutex mutex;
1123 struct mutex sync_cmd_mutex; /* enable serialization of sync commands */
1048 1124
1049 /* basic pci-network driver stuff */ 1125 /* basic pci-network driver stuff */
1050 struct pci_dev *pci_dev; 1126 struct pci_dev *pci_dev;
@@ -1135,6 +1211,8 @@ struct iwl_priv {
1135 struct iwl_notif_statistics statistics; 1211 struct iwl_notif_statistics statistics;
1136#ifdef CONFIG_IWLWIFI_DEBUG 1212#ifdef CONFIG_IWLWIFI_DEBUG
1137 struct iwl_notif_statistics accum_statistics; 1213 struct iwl_notif_statistics accum_statistics;
1214 struct iwl_notif_statistics delta_statistics;
1215 struct iwl_notif_statistics max_delta;
1138#endif 1216#endif
1139 1217
1140 /* context information */ 1218 /* context information */
@@ -1207,15 +1285,10 @@ struct iwl_priv {
1207 1285
1208 struct workqueue_struct *workqueue; 1286 struct workqueue_struct *workqueue;
1209 1287
1210 struct work_struct up;
1211 struct work_struct restart; 1288 struct work_struct restart;
1212 struct work_struct calibrated_work;
1213 struct work_struct scan_completed; 1289 struct work_struct scan_completed;
1214 struct work_struct rx_replenish; 1290 struct work_struct rx_replenish;
1215 struct work_struct abort_scan; 1291 struct work_struct abort_scan;
1216 struct work_struct update_link_led;
1217 struct work_struct auth_work;
1218 struct work_struct report_work;
1219 struct work_struct request_scan; 1292 struct work_struct request_scan;
1220 struct work_struct beacon_update; 1293 struct work_struct beacon_update;
1221 struct work_struct tt_work; 1294 struct work_struct tt_work;
@@ -1251,7 +1324,8 @@ struct iwl_priv {
1251 u16 rx_traffic_idx; 1324 u16 rx_traffic_idx;
1252 u8 *tx_traffic; 1325 u8 *tx_traffic;
1253 u8 *rx_traffic; 1326 u8 *rx_traffic;
1254 struct iwl_debugfs *dbgfs; 1327 struct dentry *debugfs_dir;
1328 u32 dbgfs_sram_offset, dbgfs_sram_len;
1255#endif /* CONFIG_IWLWIFI_DEBUGFS */ 1329#endif /* CONFIG_IWLWIFI_DEBUGFS */
1256#endif /* CONFIG_IWLWIFI_DEBUG */ 1330#endif /* CONFIG_IWLWIFI_DEBUG */
1257 1331
@@ -1261,6 +1335,7 @@ struct iwl_priv {
1261 u32 disable_tx_power_cal; 1335 u32 disable_tx_power_cal;
1262 struct work_struct run_time_calib_work; 1336 struct work_struct run_time_calib_work;
1263 struct timer_list statistics_periodic; 1337 struct timer_list statistics_periodic;
1338 struct timer_list ucode_trace;
1264 bool hw_ready; 1339 bool hw_ready;
1265 /*For 3945*/ 1340 /*For 3945*/
1266#define IWL_DEFAULT_TX_POWER 0x0F 1341#define IWL_DEFAULT_TX_POWER 0x0F
@@ -1268,6 +1343,8 @@ struct iwl_priv {
1268 struct iwl3945_notif_statistics statistics_39; 1343 struct iwl3945_notif_statistics statistics_39;
1269 1344
1270 u32 sta_supp_rates; 1345 u32 sta_supp_rates;
1346
1347 struct iwl_event_log event_log;
1271}; /*iwl_priv */ 1348}; /*iwl_priv */
1272 1349
1273static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id) 1350static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 83cc4e500a96..36580d8d8b8d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -37,4 +37,6 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32);
37EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx); 37EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx);
38EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event); 38EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
39EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error); 39EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
40EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
41EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event);
40#endif 42#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index d9c7363b1bbb..ff4d012ce260 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -91,6 +91,50 @@ TRACE_EVENT(iwlwifi_dev_iowrite32,
91); 91);
92 92
93#undef TRACE_SYSTEM 93#undef TRACE_SYSTEM
94#define TRACE_SYSTEM iwlwifi_ucode
95
96TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
97 TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
98 TP_ARGS(priv, time, data, ev),
99 TP_STRUCT__entry(
100 PRIV_ENTRY
101
102 __field(u32, time)
103 __field(u32, data)
104 __field(u32, ev)
105 ),
106 TP_fast_assign(
107 PRIV_ASSIGN;
108 __entry->time = time;
109 __entry->data = data;
110 __entry->ev = ev;
111 ),
112 TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
113 __entry->priv, __entry->time, __entry->data, __entry->ev)
114);
115
116TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
117 TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry),
118 TP_ARGS(priv, wraps, n_entry, p_entry),
119 TP_STRUCT__entry(
120 PRIV_ENTRY
121
122 __field(u32, wraps)
123 __field(u32, n_entry)
124 __field(u32, p_entry)
125 ),
126 TP_fast_assign(
127 PRIV_ASSIGN;
128 __entry->wraps = wraps;
129 __entry->n_entry = n_entry;
130 __entry->p_entry = p_entry;
131 ),
132 TP_printk("[%p] wraps=#%02d n=0x%X p=0x%X",
133 __entry->priv, __entry->wraps, __entry->n_entry,
134 __entry->p_entry)
135);
136
137#undef TRACE_SYSTEM
94#define TRACE_SYSTEM iwlwifi 138#define TRACE_SYSTEM iwlwifi
95 139
96TRACE_EVENT(iwlwifi_dev_hcmd, 140TRACE_EVENT(iwlwifi_dev_hcmd,
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 4a30969689ff..fd37152abae3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 0cd9c02ee044..4e1ba824dc50 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 65fa8a69fd5a..113c3669b9ce 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -379,6 +379,25 @@
379 379
380#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010) 380#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
381 381
382/**
383 * Bit fields for TSSR(Tx Shared Status & Control) error status register:
384 * 31: Indicates an address error when accessed to internal memory
385 * uCode/driver must write "1" in order to clear this flag
386 * 30: Indicates that Host did not send the expected number of dwords to FH
387 * uCode/driver must write "1" in order to clear this flag
388 * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
389 * command was received from the scheduler while the TRB was already full
390 * with previous command
391 * uCode/driver must write "1" in order to clear this flag
392 * 7-0: Each status bit indicates a channel's TxCredit error. When an error
393 * bit is set, it indicates that the FH has received a full indication
394 * from the RTC TxFIFO and the current value of the TxCredit counter was
395 * not equal to zero. This mean that the credit mechanism was not
396 * synchronized to the TxFIFO status
397 * uCode/driver must write "1" in order to clear this flag
398 */
399#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
400
382#define FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) ((1 << (_chnl)) << 24) 401#define FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) ((1 << (_chnl)) << 24)
383#define FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl) ((1 << (_chnl)) << 16) 402#define FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl) ((1 << (_chnl)) << 16)
384 403
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 30e9ea6d54ec..73681c4fefe7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -58,7 +58,6 @@ const char *get_cmd_string(u8 cmd)
58 IWL_CMD(COEX_PRIORITY_TABLE_CMD); 58 IWL_CMD(COEX_PRIORITY_TABLE_CMD);
59 IWL_CMD(COEX_MEDIUM_NOTIFICATION); 59 IWL_CMD(COEX_MEDIUM_NOTIFICATION);
60 IWL_CMD(COEX_EVENT_CMD); 60 IWL_CMD(COEX_EVENT_CMD);
61 IWL_CMD(RADAR_NOTIFICATION);
62 IWL_CMD(REPLY_QUIET_CMD); 61 IWL_CMD(REPLY_QUIET_CMD);
63 IWL_CMD(REPLY_CHANNEL_SWITCH); 62 IWL_CMD(REPLY_CHANNEL_SWITCH);
64 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION); 63 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
@@ -165,15 +164,13 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
165 /* A synchronous command can not have a callback set. */ 164 /* A synchronous command can not have a callback set. */
166 BUG_ON(cmd->callback); 165 BUG_ON(cmd->callback);
167 166
168 if (test_and_set_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status)) { 167 IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
169 IWL_ERR(priv,
170 "Error sending %s: Already sending a host command\n",
171 get_cmd_string(cmd->id)); 168 get_cmd_string(cmd->id));
172 ret = -EBUSY; 169 mutex_lock(&priv->sync_cmd_mutex);
173 goto out;
174 }
175 170
176 set_bit(STATUS_HCMD_ACTIVE, &priv->status); 171 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
172 IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s \n",
173 get_cmd_string(cmd->id));
177 174
178 cmd_idx = iwl_enqueue_hcmd(priv, cmd); 175 cmd_idx = iwl_enqueue_hcmd(priv, cmd);
179 if (cmd_idx < 0) { 176 if (cmd_idx < 0) {
@@ -194,6 +191,8 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
194 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 191 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
195 192
196 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 193 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
194 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n",
195 get_cmd_string(cmd->id));
197 ret = -ETIMEDOUT; 196 ret = -ETIMEDOUT;
198 goto cancel; 197 goto cancel;
199 } 198 }
@@ -238,7 +237,7 @@ fail:
238 cmd->reply_page = 0; 237 cmd->reply_page = 0;
239 } 238 }
240out: 239out:
241 clear_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status); 240 mutex_unlock(&priv->sync_cmd_mutex);
242 return ret; 241 return ret;
243} 242}
244EXPORT_SYMBOL(iwl_send_cmd_sync); 243EXPORT_SYMBOL(iwl_send_cmd_sync);
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index bd0b12efb5c7..51a67fb2e185 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -80,8 +80,8 @@ static inline void iwl_free_fw_desc(struct pci_dev *pci_dev,
80 struct fw_desc *desc) 80 struct fw_desc *desc)
81{ 81{
82 if (desc->v_addr) 82 if (desc->v_addr)
83 pci_free_consistent(pci_dev, desc->len, 83 dma_free_coherent(&pci_dev->dev, desc->len,
84 desc->v_addr, desc->p_addr); 84 desc->v_addr, desc->p_addr);
85 desc->v_addr = NULL; 85 desc->v_addr = NULL;
86 desc->len = 0; 86 desc->len = 0;
87} 87}
@@ -89,7 +89,8 @@ static inline void iwl_free_fw_desc(struct pci_dev *pci_dev,
89static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev, 89static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev,
90 struct fw_desc *desc) 90 struct fw_desc *desc)
91{ 91{
92 desc->v_addr = pci_alloc_consistent(pci_dev, desc->len, &desc->p_addr); 92 desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
93 &desc->p_addr, GFP_KERNEL);
93 return (desc->v_addr != NULL) ? 0 : -ENOMEM; 94 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
94} 95}
95 96
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index e552d4c4bdbe..c719baf2585a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project. 5 * Portions of this file are derived from the ipw3945 project.
6 * 6 *
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 46c7a95b88f0..a6f9c918aabc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index f47f053f02ea..49a70baa3fb6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 8ccc0bb1d9ed..1a1a9f081cc7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -303,13 +303,12 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
303 sizeof(struct iwl_powertable_cmd), cmd); 303 sizeof(struct iwl_powertable_cmd), cmd);
304} 304}
305 305
306 306/* priv->mutex must be held */
307int iwl_power_update_mode(struct iwl_priv *priv, bool force) 307int iwl_power_update_mode(struct iwl_priv *priv, bool force)
308{ 308{
309 int ret = 0; 309 int ret = 0;
310 struct iwl_tt_mgmt *tt = &priv->thermal_throttle; 310 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
311 bool enabled = (priv->iw_mode == NL80211_IFTYPE_STATION) && 311 bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS;
312 (priv->hw->conf.flags & IEEE80211_CONF_PS);
313 bool update_chains; 312 bool update_chains;
314 struct iwl_powertable_cmd cmd; 313 struct iwl_powertable_cmd cmd;
315 int dtimper; 314 int dtimper;
@@ -319,7 +318,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
319 priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE; 318 priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
320 319
321 if (priv->vif) 320 if (priv->vif)
322 dtimper = priv->vif->bss_conf.dtim_period; 321 dtimper = priv->hw->conf.ps_dtim_period;
323 else 322 else
324 dtimper = 1; 323 dtimper = 1;
325 324
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index 310c32e8f698..5db91c10dcc8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 6d95832db06d..d2d2a9174900 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 2dbce85404aa..df257bc15f49 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -123,12 +123,11 @@ EXPORT_SYMBOL(iwl_rx_queue_space);
123/** 123/**
124 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue 124 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
125 */ 125 */
126int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q) 126void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
127{ 127{
128 unsigned long flags; 128 unsigned long flags;
129 u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg; 129 u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
130 u32 reg; 130 u32 reg;
131 int ret = 0;
132 131
133 spin_lock_irqsave(&q->lock, flags); 132 spin_lock_irqsave(&q->lock, flags);
134 133
@@ -161,7 +160,6 @@ int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
161 160
162 exit_unlock: 161 exit_unlock:
163 spin_unlock_irqrestore(&q->lock, flags); 162 spin_unlock_irqrestore(&q->lock, flags);
164 return ret;
165} 163}
166EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr); 164EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
167/** 165/**
@@ -184,14 +182,13 @@ static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
184 * also updates the memory address in the firmware to reference the new 182 * also updates the memory address in the firmware to reference the new
185 * target buffer. 183 * target buffer.
186 */ 184 */
187int iwl_rx_queue_restock(struct iwl_priv *priv) 185void iwl_rx_queue_restock(struct iwl_priv *priv)
188{ 186{
189 struct iwl_rx_queue *rxq = &priv->rxq; 187 struct iwl_rx_queue *rxq = &priv->rxq;
190 struct list_head *element; 188 struct list_head *element;
191 struct iwl_rx_mem_buffer *rxb; 189 struct iwl_rx_mem_buffer *rxb;
192 unsigned long flags; 190 unsigned long flags;
193 int write; 191 int write;
194 int ret = 0;
195 192
196 spin_lock_irqsave(&rxq->lock, flags); 193 spin_lock_irqsave(&rxq->lock, flags);
197 write = rxq->write & ~0x7; 194 write = rxq->write & ~0x7;
@@ -220,10 +217,8 @@ int iwl_rx_queue_restock(struct iwl_priv *priv)
220 spin_lock_irqsave(&rxq->lock, flags); 217 spin_lock_irqsave(&rxq->lock, flags);
221 rxq->need_update = 1; 218 rxq->need_update = 1;
222 spin_unlock_irqrestore(&rxq->lock, flags); 219 spin_unlock_irqrestore(&rxq->lock, flags);
223 ret = iwl_rx_queue_update_write_ptr(priv, rxq); 220 iwl_rx_queue_update_write_ptr(priv, rxq);
224 } 221 }
225
226 return ret;
227} 222}
228EXPORT_SYMBOL(iwl_rx_queue_restock); 223EXPORT_SYMBOL(iwl_rx_queue_restock);
229 224
@@ -350,10 +345,10 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
350 } 345 }
351 } 346 }
352 347
353 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd, 348 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
354 rxq->dma_addr); 349 rxq->dma_addr);
355 pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status), 350 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
356 rxq->rb_stts, rxq->rb_stts_dma); 351 rxq->rb_stts, rxq->rb_stts_dma);
357 rxq->bd = NULL; 352 rxq->bd = NULL;
358 rxq->rb_stts = NULL; 353 rxq->rb_stts = NULL;
359} 354}
@@ -362,7 +357,7 @@ EXPORT_SYMBOL(iwl_rx_queue_free);
362int iwl_rx_queue_alloc(struct iwl_priv *priv) 357int iwl_rx_queue_alloc(struct iwl_priv *priv)
363{ 358{
364 struct iwl_rx_queue *rxq = &priv->rxq; 359 struct iwl_rx_queue *rxq = &priv->rxq;
365 struct pci_dev *dev = priv->pci_dev; 360 struct device *dev = &priv->pci_dev->dev;
366 int i; 361 int i;
367 362
368 spin_lock_init(&rxq->lock); 363 spin_lock_init(&rxq->lock);
@@ -370,12 +365,13 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
370 INIT_LIST_HEAD(&rxq->rx_used); 365 INIT_LIST_HEAD(&rxq->rx_used);
371 366
372 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ 367 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
373 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr); 368 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr,
369 GFP_KERNEL);
374 if (!rxq->bd) 370 if (!rxq->bd)
375 goto err_bd; 371 goto err_bd;
376 372
377 rxq->rb_stts = pci_alloc_consistent(dev, sizeof(struct iwl_rb_status), 373 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
378 &rxq->rb_stts_dma); 374 &rxq->rb_stts_dma, GFP_KERNEL);
379 if (!rxq->rb_stts) 375 if (!rxq->rb_stts)
380 goto err_rb; 376 goto err_rb;
381 377
@@ -392,8 +388,8 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
392 return 0; 388 return 0;
393 389
394err_rb: 390err_rb:
395 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd, 391 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
396 rxq->dma_addr); 392 rxq->dma_addr);
397err_bd: 393err_bd:
398 return -ENOMEM; 394 return -ENOMEM;
399} 395}
@@ -473,8 +469,8 @@ int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
473 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| 469 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
474 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); 470 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
475 471
476 /* Set interrupt coalescing timer to 64 x 32 = 2048 usecs */ 472 /* Set interrupt coalescing timer to default (2048 usecs) */
477 iwl_write8(priv, CSR_INT_COALESCING, 0x40); 473 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
478 474
479 return 0; 475 return 0;
480} 476}
@@ -499,9 +495,10 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
499 struct iwl_missed_beacon_notif *missed_beacon; 495 struct iwl_missed_beacon_notif *missed_beacon;
500 496
501 missed_beacon = &pkt->u.missed_beacon; 497 missed_beacon = &pkt->u.missed_beacon;
502 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) { 498 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
499 priv->missed_beacon_threshold) {
503 IWL_DEBUG_CALIB(priv, "missed bcn cnsq %d totl %d rcd %d expctd %d\n", 500 IWL_DEBUG_CALIB(priv, "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
504 le32_to_cpu(missed_beacon->consequtive_missed_beacons), 501 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
505 le32_to_cpu(missed_beacon->total_missed_becons), 502 le32_to_cpu(missed_beacon->total_missed_becons),
506 le32_to_cpu(missed_beacon->num_recvd_beacons), 503 le32_to_cpu(missed_beacon->num_recvd_beacons),
507 le32_to_cpu(missed_beacon->num_expected_beacons)); 504 le32_to_cpu(missed_beacon->num_expected_beacons));
@@ -511,6 +508,24 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
511} 508}
512EXPORT_SYMBOL(iwl_rx_missed_beacon_notif); 509EXPORT_SYMBOL(iwl_rx_missed_beacon_notif);
513 510
511void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
512 struct iwl_rx_mem_buffer *rxb)
513{
514 struct iwl_rx_packet *pkt = rxb_addr(rxb);
515 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
516
517 if (!report->state) {
518 IWL_DEBUG_11H(priv,
519 "Spectrum Measure Notification: Start\n");
520 return;
521 }
522
523 memcpy(&priv->measure_report, report, sizeof(*report));
524 priv->measurement_status |= MEASUREMENT_READY;
525}
526EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif);
527
528
514 529
515/* Calculate noise level, based on measurements during network silence just 530/* Calculate noise level, based on measurements during network silence just
516 * before arriving beacon. This measurement can be done only if we know 531 * before arriving beacon. This measurement can be done only if we know
@@ -564,15 +579,24 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
564 int i; 579 int i;
565 __le32 *prev_stats; 580 __le32 *prev_stats;
566 u32 *accum_stats; 581 u32 *accum_stats;
582 u32 *delta, *max_delta;
567 583
568 prev_stats = (__le32 *)&priv->statistics; 584 prev_stats = (__le32 *)&priv->statistics;
569 accum_stats = (u32 *)&priv->accum_statistics; 585 accum_stats = (u32 *)&priv->accum_statistics;
586 delta = (u32 *)&priv->delta_statistics;
587 max_delta = (u32 *)&priv->max_delta;
570 588
571 for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics); 589 for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics);
572 i += sizeof(__le32), stats++, prev_stats++, accum_stats++) 590 i += sizeof(__le32), stats++, prev_stats++, delta++,
573 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) 591 max_delta++, accum_stats++) {
574 *accum_stats += (le32_to_cpu(*stats) - 592 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
593 *delta = (le32_to_cpu(*stats) -
575 le32_to_cpu(*prev_stats)); 594 le32_to_cpu(*prev_stats));
595 *accum_stats += *delta;
596 if (*delta > *max_delta)
597 *max_delta = *delta;
598 }
599 }
576 600
577 /* reset accumulative statistics for "no-counter" type statistics */ 601 /* reset accumulative statistics for "no-counter" type statistics */
578 priv->accum_statistics.general.temperature = 602 priv->accum_statistics.general.temperature =
@@ -592,11 +616,15 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
592 616
593#define REG_RECALIB_PERIOD (60) 617#define REG_RECALIB_PERIOD (60)
594 618
619#define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n"
595void iwl_rx_statistics(struct iwl_priv *priv, 620void iwl_rx_statistics(struct iwl_priv *priv,
596 struct iwl_rx_mem_buffer *rxb) 621 struct iwl_rx_mem_buffer *rxb)
597{ 622{
598 int change; 623 int change;
599 struct iwl_rx_packet *pkt = rxb_addr(rxb); 624 struct iwl_rx_packet *pkt = rxb_addr(rxb);
625 int combined_plcp_delta;
626 unsigned int plcp_msec;
627 unsigned long plcp_received_jiffies;
600 628
601 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", 629 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
602 (int)sizeof(priv->statistics), 630 (int)sizeof(priv->statistics),
@@ -611,6 +639,56 @@ void iwl_rx_statistics(struct iwl_priv *priv,
611#ifdef CONFIG_IWLWIFI_DEBUG 639#ifdef CONFIG_IWLWIFI_DEBUG
612 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); 640 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
613#endif 641#endif
642 /*
643 * check for plcp_err and trigger radio reset if it exceeds
644 * the plcp error threshold plcp_delta.
645 */
646 plcp_received_jiffies = jiffies;
647 plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
648 (long) priv->plcp_jiffies);
649 priv->plcp_jiffies = plcp_received_jiffies;
650 /*
651 * check to make sure plcp_msec is not 0 to prevent division
652 * by zero.
653 */
654 if (plcp_msec) {
655 combined_plcp_delta =
656 (le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err) -
657 le32_to_cpu(priv->statistics.rx.ofdm.plcp_err)) +
658 (le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err) -
659 le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err));
660
661 if ((combined_plcp_delta > 0) &&
662 ((combined_plcp_delta * 100) / plcp_msec) >
663 priv->cfg->plcp_delta_threshold) {
664 /*
665 * if plcp_err exceed the threshold, the following
666 * data is printed in csv format:
667 * Text: plcp_err exceeded %d,
668 * Received ofdm.plcp_err,
669 * Current ofdm.plcp_err,
670 * Received ofdm_ht.plcp_err,
671 * Current ofdm_ht.plcp_err,
672 * combined_plcp_delta,
673 * plcp_msec
674 */
675 IWL_DEBUG_RADIO(priv, PLCP_MSG,
676 priv->cfg->plcp_delta_threshold,
677 le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err),
678 le32_to_cpu(priv->statistics.rx.ofdm.plcp_err),
679 le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err),
680 le32_to_cpu(
681 priv->statistics.rx.ofdm_ht.plcp_err),
682 combined_plcp_delta, plcp_msec);
683
684 /*
685 * Reset the RF radio due to the high plcp
686 * error rate
687 */
688 iwl_force_reset(priv, IWL_RF_RESET);
689 }
690 }
691
614 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); 692 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
615 693
616 set_bit(STATUS_STATISTICS, &priv->status); 694 set_bit(STATUS_STATISTICS, &priv->status);
@@ -638,11 +716,13 @@ void iwl_reply_statistics(struct iwl_priv *priv,
638 struct iwl_rx_packet *pkt = rxb_addr(rxb); 716 struct iwl_rx_packet *pkt = rxb_addr(rxb);
639 717
640 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) { 718 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
641 memset(&priv->statistics, 0,
642 sizeof(struct iwl_notif_statistics));
643#ifdef CONFIG_IWLWIFI_DEBUG 719#ifdef CONFIG_IWLWIFI_DEBUG
644 memset(&priv->accum_statistics, 0, 720 memset(&priv->accum_statistics, 0,
645 sizeof(struct iwl_notif_statistics)); 721 sizeof(struct iwl_notif_statistics));
722 memset(&priv->delta_statistics, 0,
723 sizeof(struct iwl_notif_statistics));
724 memset(&priv->max_delta, 0,
725 sizeof(struct iwl_notif_statistics));
646#endif 726#endif
647 IWL_DEBUG_RX(priv, "Statistics have been cleared\n"); 727 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
648 } 728 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index fa1c89ba6459..dd9ff2ed645a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -192,19 +192,17 @@ static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
192 IWL_DEBUG_SCAN(priv, "Scan ch.res: " 192 IWL_DEBUG_SCAN(priv, "Scan ch.res: "
193 "%d [802.11%s] " 193 "%d [802.11%s] "
194 "(TSF: 0x%08X:%08X) - %d " 194 "(TSF: 0x%08X:%08X) - %d "
195 "elapsed=%lu usec (%dms since last)\n", 195 "elapsed=%lu usec\n",
196 notif->channel, 196 notif->channel,
197 notif->band ? "bg" : "a", 197 notif->band ? "bg" : "a",
198 le32_to_cpu(notif->tsf_high), 198 le32_to_cpu(notif->tsf_high),
199 le32_to_cpu(notif->tsf_low), 199 le32_to_cpu(notif->tsf_low),
200 le32_to_cpu(notif->statistics[0]), 200 le32_to_cpu(notif->statistics[0]),
201 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf, 201 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
202 jiffies_to_msecs(elapsed_jiffies
203 (priv->last_scan_jiffies, jiffies)));
204#endif 202#endif
205 203
206 priv->last_scan_jiffies = jiffies; 204 if (!priv->is_internal_short_scan)
207 priv->next_scan_jiffies = 0; 205 priv->next_scan_jiffies = 0;
208} 206}
209 207
210/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ 208/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
@@ -250,8 +248,9 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
250 goto reschedule; 248 goto reschedule;
251 } 249 }
252 250
253 priv->last_scan_jiffies = jiffies; 251 if (!priv->is_internal_short_scan)
254 priv->next_scan_jiffies = 0; 252 priv->next_scan_jiffies = 0;
253
255 IWL_DEBUG_INFO(priv, "Setting scan to off\n"); 254 IWL_DEBUG_INFO(priv, "Setting scan to off\n");
256 255
257 clear_bit(STATUS_SCANNING, &priv->status); 256 clear_bit(STATUS_SCANNING, &priv->status);
@@ -314,6 +313,72 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
314} 313}
315EXPORT_SYMBOL(iwl_get_passive_dwell_time); 314EXPORT_SYMBOL(iwl_get_passive_dwell_time);
316 315
316static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
317 enum ieee80211_band band,
318 struct iwl_scan_channel *scan_ch)
319{
320 const struct ieee80211_supported_band *sband;
321 const struct iwl_channel_info *ch_info;
322 u16 passive_dwell = 0;
323 u16 active_dwell = 0;
324 int i, added = 0;
325 u16 channel = 0;
326
327 sband = iwl_get_hw_mode(priv, band);
328 if (!sband) {
329 IWL_ERR(priv, "invalid band\n");
330 return added;
331 }
332
333 active_dwell = iwl_get_active_dwell_time(priv, band, 0);
334 passive_dwell = iwl_get_passive_dwell_time(priv, band);
335
336 if (passive_dwell <= active_dwell)
337 passive_dwell = active_dwell + 1;
338
339 /* only scan single channel, good enough to reset the RF */
340 /* pick the first valid not in-use channel */
341 if (band == IEEE80211_BAND_5GHZ) {
342 for (i = 14; i < priv->channel_count; i++) {
343 if (priv->channel_info[i].channel !=
344 le16_to_cpu(priv->staging_rxon.channel)) {
345 channel = priv->channel_info[i].channel;
346 ch_info = iwl_get_channel_info(priv,
347 band, channel);
348 if (is_channel_valid(ch_info))
349 break;
350 }
351 }
352 } else {
353 for (i = 0; i < 14; i++) {
354 if (priv->channel_info[i].channel !=
355 le16_to_cpu(priv->staging_rxon.channel)) {
356 channel =
357 priv->channel_info[i].channel;
358 ch_info = iwl_get_channel_info(priv,
359 band, channel);
360 if (is_channel_valid(ch_info))
361 break;
362 }
363 }
364 }
365 if (channel) {
366 scan_ch->channel = cpu_to_le16(channel);
367 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
368 scan_ch->active_dwell = cpu_to_le16(active_dwell);
369 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
370 /* Set txpower levels to defaults */
371 scan_ch->dsp_atten = 110;
372 if (band == IEEE80211_BAND_5GHZ)
373 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
374 else
375 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
376 added++;
377 } else
378 IWL_ERR(priv, "no valid channel found\n");
379 return added;
380}
381
317static int iwl_get_channels_for_scan(struct iwl_priv *priv, 382static int iwl_get_channels_for_scan(struct iwl_priv *priv,
318 enum ieee80211_band band, 383 enum ieee80211_band band,
319 u8 is_active, u8 n_probes, 384 u8 is_active, u8 n_probes,
@@ -404,23 +469,9 @@ EXPORT_SYMBOL(iwl_init_scan_params);
404 469
405static int iwl_scan_initiate(struct iwl_priv *priv) 470static int iwl_scan_initiate(struct iwl_priv *priv)
406{ 471{
407 if (!iwl_is_ready_rf(priv)) {
408 IWL_DEBUG_SCAN(priv, "Aborting scan due to not ready.\n");
409 return -EIO;
410 }
411
412 if (test_bit(STATUS_SCANNING, &priv->status)) {
413 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
414 return -EAGAIN;
415 }
416
417 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
418 IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
419 return -EAGAIN;
420 }
421
422 IWL_DEBUG_INFO(priv, "Starting scan...\n"); 472 IWL_DEBUG_INFO(priv, "Starting scan...\n");
423 set_bit(STATUS_SCANNING, &priv->status); 473 set_bit(STATUS_SCANNING, &priv->status);
474 priv->is_internal_short_scan = false;
424 priv->scan_start = jiffies; 475 priv->scan_start = jiffies;
425 priv->scan_pass_start = priv->scan_start; 476 priv->scan_pass_start = priv->scan_start;
426 477
@@ -449,6 +500,18 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
449 goto out_unlock; 500 goto out_unlock;
450 } 501 }
451 502
503 if (test_bit(STATUS_SCANNING, &priv->status)) {
504 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
505 ret = -EAGAIN;
506 goto out_unlock;
507 }
508
509 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
510 IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
511 ret = -EAGAIN;
512 goto out_unlock;
513 }
514
452 /* We don't schedule scan within next_scan_jiffies period. 515 /* We don't schedule scan within next_scan_jiffies period.
453 * Avoid scanning during possible EAPOL exchange, return 516 * Avoid scanning during possible EAPOL exchange, return
454 * success immediately. 517 * success immediately.
@@ -461,15 +524,6 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
461 goto out_unlock; 524 goto out_unlock;
462 } 525 }
463 526
464 /* if we just finished scan ask for delay */
465 if (iwl_is_associated(priv) && priv->last_scan_jiffies &&
466 time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN, jiffies)) {
467 IWL_DEBUG_SCAN(priv, "scan rejected: within previous scan period\n");
468 queue_work(priv->workqueue, &priv->scan_completed);
469 ret = 0;
470 goto out_unlock;
471 }
472
473 priv->scan_bands = 0; 527 priv->scan_bands = 0;
474 for (i = 0; i < req->n_channels; i++) 528 for (i = 0; i < req->n_channels; i++)
475 priv->scan_bands |= BIT(req->channels[i]->band); 529 priv->scan_bands |= BIT(req->channels[i]->band);
@@ -488,6 +542,46 @@ out_unlock:
488} 542}
489EXPORT_SYMBOL(iwl_mac_hw_scan); 543EXPORT_SYMBOL(iwl_mac_hw_scan);
490 544
545/*
546 * internal short scan, this function should only been called while associated.
547 * It will reset and tune the radio to prevent possible RF related problem
548 */
549int iwl_internal_short_hw_scan(struct iwl_priv *priv)
550{
551 int ret = 0;
552
553 if (!iwl_is_ready_rf(priv)) {
554 ret = -EIO;
555 IWL_DEBUG_SCAN(priv, "not ready or exit pending\n");
556 goto out;
557 }
558 if (test_bit(STATUS_SCANNING, &priv->status)) {
559 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
560 ret = -EAGAIN;
561 goto out;
562 }
563 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
564 IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
565 ret = -EAGAIN;
566 goto out;
567 }
568
569 priv->scan_bands = 0;
570 if (priv->band == IEEE80211_BAND_5GHZ)
571 priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
572 else
573 priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
574
575 IWL_DEBUG_SCAN(priv, "Start internal short scan...\n");
576 set_bit(STATUS_SCANNING, &priv->status);
577 priv->is_internal_short_scan = true;
578 queue_work(priv->workqueue, &priv->request_scan);
579
580out:
581 return ret;
582}
583EXPORT_SYMBOL(iwl_internal_short_hw_scan);
584
491#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) 585#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
492 586
493void iwl_bg_scan_check(struct work_struct *data) 587void iwl_bg_scan_check(struct work_struct *data)
@@ -544,14 +638,26 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
544 if (left < 0) 638 if (left < 0)
545 return 0; 639 return 0;
546 *pos++ = WLAN_EID_SSID; 640 *pos++ = WLAN_EID_SSID;
547 *pos++ = 0; 641 if (!priv->is_internal_short_scan &&
548 642 priv->scan_request->n_ssids) {
549 len += 2; 643 struct cfg80211_ssid *ssid =
644 priv->scan_request->ssids;
645
646 /* Broadcast if ssid_len is 0 */
647 *pos++ = ssid->ssid_len;
648 memcpy(pos, ssid->ssid, ssid->ssid_len);
649 pos += ssid->ssid_len;
650 len += 2 + ssid->ssid_len;
651 } else {
652 *pos++ = 0;
653 len += 2;
654 }
550 655
551 if (WARN_ON(left < ie_len)) 656 if (WARN_ON(left < ie_len))
552 return len; 657 return len;
553 658
554 memcpy(pos, ies, ie_len); 659 if (ies)
660 memcpy(pos, ies, ie_len);
555 len += ie_len; 661 len += ie_len;
556 left -= ie_len; 662 left -= ie_len;
557 663
@@ -654,7 +760,6 @@ static void iwl_bg_request_scan(struct work_struct *data)
654 unsigned long flags; 760 unsigned long flags;
655 761
656 IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); 762 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
657
658 spin_lock_irqsave(&priv->lock, flags); 763 spin_lock_irqsave(&priv->lock, flags);
659 interval = priv->beacon_int; 764 interval = priv->beacon_int;
660 spin_unlock_irqrestore(&priv->lock, flags); 765 spin_unlock_irqrestore(&priv->lock, flags);
@@ -672,21 +777,29 @@ static void iwl_bg_request_scan(struct work_struct *data)
672 scan_suspend_time, interval); 777 scan_suspend_time, interval);
673 } 778 }
674 779
675 if (priv->scan_request->n_ssids) { 780 if (priv->is_internal_short_scan) {
676 int i, p = 0; 781 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
782 } else if (priv->scan_request->n_ssids) {
677 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); 783 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
678 for (i = 0; i < priv->scan_request->n_ssids; i++) { 784 /*
679 /* always does wildcard anyway */ 785 * The first SSID to scan is stuffed into the probe request
680 if (!priv->scan_request->ssids[i].ssid_len) 786 * template and the remaining ones are handled through the
681 continue; 787 * direct_scan array.
682 scan->direct_scan[p].id = WLAN_EID_SSID; 788 */
683 scan->direct_scan[p].len = 789 if (priv->scan_request->n_ssids > 1) {
684 priv->scan_request->ssids[i].ssid_len; 790 int i, p = 0;
685 memcpy(scan->direct_scan[p].ssid, 791 for (i = 1; i < priv->scan_request->n_ssids; i++) {
686 priv->scan_request->ssids[i].ssid, 792 if (!priv->scan_request->ssids[i].ssid_len)
687 priv->scan_request->ssids[i].ssid_len); 793 continue;
688 n_probes++; 794 scan->direct_scan[p].id = WLAN_EID_SSID;
689 p++; 795 scan->direct_scan[p].len =
796 priv->scan_request->ssids[i].ssid_len;
797 memcpy(scan->direct_scan[p].ssid,
798 priv->scan_request->ssids[i].ssid,
799 priv->scan_request->ssids[i].ssid_len);
800 n_probes++;
801 p++;
802 }
690 } 803 }
691 is_active = true; 804 is_active = true;
692 } else 805 } else
@@ -753,24 +866,38 @@ static void iwl_bg_request_scan(struct work_struct *data)
753 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; 866 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
754 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; 867 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
755 scan->rx_chain = cpu_to_le16(rx_chain); 868 scan->rx_chain = cpu_to_le16(rx_chain);
756 cmd_len = iwl_fill_probe_req(priv, 869 if (!priv->is_internal_short_scan) {
757 (struct ieee80211_mgmt *)scan->data, 870 cmd_len = iwl_fill_probe_req(priv,
758 priv->scan_request->ie, 871 (struct ieee80211_mgmt *)scan->data,
759 priv->scan_request->ie_len, 872 priv->scan_request->ie,
760 IWL_MAX_SCAN_SIZE - sizeof(*scan)); 873 priv->scan_request->ie_len,
874 IWL_MAX_SCAN_SIZE - sizeof(*scan));
875 } else {
876 cmd_len = iwl_fill_probe_req(priv,
877 (struct ieee80211_mgmt *)scan->data,
878 NULL, 0,
879 IWL_MAX_SCAN_SIZE - sizeof(*scan));
761 880
881 }
762 scan->tx_cmd.len = cpu_to_le16(cmd_len); 882 scan->tx_cmd.len = cpu_to_le16(cmd_len);
763
764 if (iwl_is_monitor_mode(priv)) 883 if (iwl_is_monitor_mode(priv))
765 scan->filter_flags = RXON_FILTER_PROMISC_MSK; 884 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
766 885
767 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK | 886 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
768 RXON_FILTER_BCON_AWARE_MSK); 887 RXON_FILTER_BCON_AWARE_MSK);
769 888
770 scan->channel_count = 889 if (priv->is_internal_short_scan) {
771 iwl_get_channels_for_scan(priv, band, is_active, n_probes, 890 scan->channel_count =
772 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); 891 iwl_get_single_channel_for_scan(priv, band,
773 892 (void *)&scan->data[le16_to_cpu(
893 scan->tx_cmd.len)]);
894 } else {
895 scan->channel_count =
896 iwl_get_channels_for_scan(priv, band,
897 is_active, n_probes,
898 (void *)&scan->data[le16_to_cpu(
899 scan->tx_cmd.len)]);
900 }
774 if (scan->channel_count == 0) { 901 if (scan->channel_count == 0) {
775 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); 902 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
776 goto done; 903 goto done;
@@ -831,7 +958,12 @@ void iwl_bg_scan_completed(struct work_struct *work)
831 958
832 cancel_delayed_work(&priv->scan_check); 959 cancel_delayed_work(&priv->scan_check);
833 960
834 ieee80211_scan_completed(priv->hw, false); 961 if (!priv->is_internal_short_scan)
962 ieee80211_scan_completed(priv->hw, false);
963 else {
964 priv->is_internal_short_scan = false;
965 IWL_DEBUG_SCAN(priv, "internal short scan completed\n");
966 }
835 967
836 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 968 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
837 return; 969 return;
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.c b/drivers/net/wireless/iwlwifi/iwl-spectrum.c
deleted file mode 100644
index 1ea5cd345fe8..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.c
+++ /dev/null
@@ -1,198 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/pci.h>
34#include <linux/delay.h>
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/wireless.h>
38
39#include <net/mac80211.h>
40
41#include "iwl-eeprom.h"
42#include "iwl-dev.h"
43#include "iwl-core.h"
44#include "iwl-io.h"
45#include "iwl-spectrum.h"
46
47#define BEACON_TIME_MASK_LOW 0x00FFFFFF
48#define BEACON_TIME_MASK_HIGH 0xFF000000
49#define TIME_UNIT 1024
50
51/*
52 * extended beacon time format
53 * time in usec will be changed into a 32-bit value in 8:24 format
54 * the high 1 byte is the beacon counts
55 * the lower 3 bytes is the time in usec within one beacon interval
56 */
57
58/* TOOD: was used in sysfs debug interface need to add to mac */
59#if 0
60static u32 iwl_usecs_to_beacons(u32 usec, u32 beacon_interval)
61{
62 u32 quot;
63 u32 rem;
64 u32 interval = beacon_interval * 1024;
65
66 if (!interval || !usec)
67 return 0;
68
69 quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
70 rem = (usec % interval) & BEACON_TIME_MASK_LOW;
71
72 return (quot << 24) + rem;
73}
74
75/* base is usually what we get from ucode with each received frame,
76 * the same as HW timer counter counting down
77 */
78
79static __le32 iwl_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
80{
81 u32 base_low = base & BEACON_TIME_MASK_LOW;
82 u32 addon_low = addon & BEACON_TIME_MASK_LOW;
83 u32 interval = beacon_interval * TIME_UNIT;
84 u32 res = (base & BEACON_TIME_MASK_HIGH) +
85 (addon & BEACON_TIME_MASK_HIGH);
86
87 if (base_low > addon_low)
88 res += base_low - addon_low;
89 else if (base_low < addon_low) {
90 res += interval + base_low - addon_low;
91 res += (1 << 24);
92 } else
93 res += (1 << 24);
94
95 return cpu_to_le32(res);
96}
97static int iwl_get_measurement(struct iwl_priv *priv,
98 struct ieee80211_measurement_params *params,
99 u8 type)
100{
101 struct iwl4965_spectrum_cmd spectrum;
102 struct iwl_rx_packet *res;
103 struct iwl_host_cmd cmd = {
104 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
105 .data = (void *)&spectrum,
106 .meta.flags = CMD_WANT_SKB,
107 };
108 u32 add_time = le64_to_cpu(params->start_time);
109 int rc;
110 int spectrum_resp_status;
111 int duration = le16_to_cpu(params->duration);
112
113 if (iwl_is_associated(priv))
114 add_time =
115 iwl_usecs_to_beacons(
116 le64_to_cpu(params->start_time) - priv->last_tsf,
117 le16_to_cpu(priv->rxon_timing.beacon_interval));
118
119 memset(&spectrum, 0, sizeof(spectrum));
120
121 spectrum.channel_count = cpu_to_le16(1);
122 spectrum.flags =
123 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
124 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
125 cmd.len = sizeof(spectrum);
126 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
127
128 if (iwl_is_associated(priv))
129 spectrum.start_time =
130 iwl_add_beacon_time(priv->last_beacon_time,
131 add_time,
132 le16_to_cpu(priv->rxon_timing.beacon_interval));
133 else
134 spectrum.start_time = 0;
135
136 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
137 spectrum.channels[0].channel = params->channel;
138 spectrum.channels[0].type = type;
139 if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)
140 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
141 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
142
143 rc = iwl_send_cmd_sync(priv, &cmd);
144 if (rc)
145 return rc;
146
147 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
148 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
149 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
150 rc = -EIO;
151 }
152
153 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
154 switch (spectrum_resp_status) {
155 case 0: /* Command will be handled */
156 if (res->u.spectrum.id != 0xff) {
157 IWL_DEBUG_INFO(priv,
158 "Replaced existing measurement: %d\n",
159 res->u.spectrum.id);
160 priv->measurement_status &= ~MEASUREMENT_READY;
161 }
162 priv->measurement_status |= MEASUREMENT_ACTIVE;
163 rc = 0;
164 break;
165
166 case 1: /* Command will not be handled */
167 rc = -EAGAIN;
168 break;
169 }
170
171 dev_kfree_skb_any(cmd.meta.u.skb);
172
173 return rc;
174}
175#endif
176
177static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
178 struct iwl_rx_mem_buffer *rxb)
179{
180 struct iwl_rx_packet *pkt = rxb_addr(rxb);
181 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
182
183 if (!report->state) {
184 IWL_DEBUG_11H(priv,
185 "Spectrum Measure Notification: Start\n");
186 return;
187 }
188
189 memcpy(&priv->measure_report, report, sizeof(*report));
190 priv->measurement_status |= MEASUREMENT_READY;
191}
192
193void iwl_setup_spectrum_handlers(struct iwl_priv *priv)
194{
195 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
196 iwl_rx_spectrum_measure_notif;
197}
198EXPORT_SYMBOL(iwl_setup_spectrum_handlers);
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.h b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
index a77c1e619062..af6babee2891 100644
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.h
+++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ieee80211 subsystem header files. 5 * Portions of this file are derived from the ieee80211 subsystem header files.
6 * 6 *
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 90fbdb25399e..4a6686fa6b36 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -80,46 +80,103 @@ int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
80} 80}
81EXPORT_SYMBOL(iwl_get_ra_sta_id); 81EXPORT_SYMBOL(iwl_get_ra_sta_id);
82 82
83/* priv->sta_lock must be held */
83static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) 84static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
84{ 85{
85 unsigned long flags;
86
87 spin_lock_irqsave(&priv->sta_lock, flags);
88 86
89 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) 87 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
90 IWL_ERR(priv, "ACTIVATE a non DRIVER active station %d\n", 88 IWL_ERR(priv, "ACTIVATE a non DRIVER active station id %u addr %pM\n",
91 sta_id); 89 sta_id, priv->stations[sta_id].sta.sta.addr);
92
93 priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
94 IWL_DEBUG_ASSOC(priv, "Added STA to Ucode: %pM\n",
95 priv->stations[sta_id].sta.sta.addr);
96 90
97 spin_unlock_irqrestore(&priv->sta_lock, flags); 91 if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
92 IWL_DEBUG_ASSOC(priv,
93 "STA id %u addr %pM already present in uCode (according to driver)\n",
94 sta_id, priv->stations[sta_id].sta.sta.addr);
95 } else {
96 priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
97 IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
98 sta_id, priv->stations[sta_id].sta.sta.addr);
99 }
98} 100}
99 101
100static void iwl_add_sta_callback(struct iwl_priv *priv, 102static void iwl_process_add_sta_resp(struct iwl_priv *priv,
101 struct iwl_device_cmd *cmd, 103 struct iwl_addsta_cmd *addsta,
102 struct iwl_rx_packet *pkt) 104 struct iwl_rx_packet *pkt,
105 bool sync)
103{ 106{
104 struct iwl_addsta_cmd *addsta =
105 (struct iwl_addsta_cmd *)cmd->cmd.payload;
106 u8 sta_id = addsta->sta.sta_id; 107 u8 sta_id = addsta->sta.sta_id;
108 unsigned long flags;
107 109
108 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { 110 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
109 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n", 111 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
110 pkt->hdr.flags); 112 pkt->hdr.flags);
111 return; 113 return;
112 } 114 }
113 115
116 IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
117 sta_id);
118
119 spin_lock_irqsave(&priv->sta_lock, flags);
120
114 switch (pkt->u.add_sta.status) { 121 switch (pkt->u.add_sta.status) {
115 case ADD_STA_SUCCESS_MSK: 122 case ADD_STA_SUCCESS_MSK:
123 IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
116 iwl_sta_ucode_activate(priv, sta_id); 124 iwl_sta_ucode_activate(priv, sta_id);
117 /* fall through */ 125 break;
126 case ADD_STA_NO_ROOM_IN_TABLE:
127 IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
128 sta_id);
129 break;
130 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
131 IWL_ERR(priv, "Adding station %d failed, no block ack resource.\n",
132 sta_id);
133 break;
134 case ADD_STA_MODIFY_NON_EXIST_STA:
135 IWL_ERR(priv, "Attempting to modify non-existing station %d \n",
136 sta_id);
137 break;
118 default: 138 default:
119 IWL_DEBUG_HC(priv, "Received REPLY_ADD_STA:(0x%08X)\n", 139 IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
120 pkt->u.add_sta.status); 140 pkt->u.add_sta.status);
121 break; 141 break;
122 } 142 }
143
144 IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
145 priv->stations[sta_id].sta.mode ==
146 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
147 sta_id, priv->stations[sta_id].sta.sta.addr);
148
149 /*
150 * XXX: The MAC address in the command buffer is often changed from
151 * the original sent to the device. That is, the MAC address
152 * written to the command buffer often is not the same MAC adress
153 * read from the command buffer when the command returns. This
154 * issue has not yet been resolved and this debugging is left to
155 * observe the problem.
156 */
157 IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
158 priv->stations[sta_id].sta.mode ==
159 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
160 addsta->sta.addr);
161
162 /*
163 * Determine if we wanted to modify or add a station,
164 * if adding a station succeeded we have some more initialization
165 * to do when using station notification. TODO
166 */
167
168 spin_unlock_irqrestore(&priv->sta_lock, flags);
169}
170
171static void iwl_add_sta_callback(struct iwl_priv *priv,
172 struct iwl_device_cmd *cmd,
173 struct iwl_rx_packet *pkt)
174{
175 struct iwl_addsta_cmd *addsta =
176 (struct iwl_addsta_cmd *)cmd->cmd.payload;
177
178 iwl_process_add_sta_resp(priv, addsta, pkt, false);
179
123} 180}
124 181
125int iwl_send_add_sta(struct iwl_priv *priv, 182int iwl_send_add_sta(struct iwl_priv *priv,
@@ -145,24 +202,9 @@ int iwl_send_add_sta(struct iwl_priv *priv,
145 if (ret || (flags & CMD_ASYNC)) 202 if (ret || (flags & CMD_ASYNC))
146 return ret; 203 return ret;
147 204
148 pkt = (struct iwl_rx_packet *)cmd.reply_page;
149 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
150 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
151 pkt->hdr.flags);
152 ret = -EIO;
153 }
154
155 if (ret == 0) { 205 if (ret == 0) {
156 switch (pkt->u.add_sta.status) { 206 pkt = (struct iwl_rx_packet *)cmd.reply_page;
157 case ADD_STA_SUCCESS_MSK: 207 iwl_process_add_sta_resp(priv, sta, pkt, true);
158 iwl_sta_ucode_activate(priv, sta->sta.sta_id);
159 IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
160 break;
161 default:
162 ret = -EIO;
163 IWL_WARN(priv, "REPLY_ADD_STA failed\n");
164 break;
165 }
166 } 208 }
167 iwl_free_pages(priv, cmd.reply_page); 209 iwl_free_pages(priv, cmd.reply_page);
168 210
@@ -1003,24 +1045,19 @@ int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
1003 struct ieee80211_sta_ht_cap *cur_ht_config = NULL; 1045 struct ieee80211_sta_ht_cap *cur_ht_config = NULL;
1004 u8 sta_id; 1046 u8 sta_id;
1005 1047
1006 /* Add station to device's station table */
1007
1008 /* 1048 /*
1009 * XXX: This check is definitely not correct, if we're an AP 1049 * Set HT capabilities. It is ok to set this struct even if not using
1010 * it'll always be false which is not what we want, but 1050 * HT config: the priv->current_ht_config.is_ht flag will just be false
1011 * it doesn't look like iwlagn is prepared to be an HT
1012 * AP anyway.
1013 */ 1051 */
1014 if (priv->current_ht_config.is_ht) { 1052 rcu_read_lock();
1015 rcu_read_lock(); 1053 sta = ieee80211_find_sta(priv->vif, addr);
1016 sta = ieee80211_find_sta(priv->vif, addr); 1054 if (sta) {
1017 if (sta) { 1055 memcpy(&ht_config, &sta->ht_cap, sizeof(ht_config));
1018 memcpy(&ht_config, &sta->ht_cap, sizeof(ht_config)); 1056 cur_ht_config = &ht_config;
1019 cur_ht_config = &ht_config;
1020 }
1021 rcu_read_unlock();
1022 } 1057 }
1058 rcu_read_unlock();
1023 1059
1060 /* Add station to device's station table */
1024 sta_id = iwl_add_station(priv, addr, is_ap, CMD_SYNC, cur_ht_config); 1061 sta_id = iwl_add_station(priv, addr, is_ap, CMD_SYNC, cur_ht_config);
1025 1062
1026 /* Set up default rate scaling table in device's station table */ 1063 /* Set up default rate scaling table in device's station table */
@@ -1085,6 +1122,7 @@ static void iwl_sta_init_bcast_lq(struct iwl_priv *priv)
1085 */ 1122 */
1086void iwl_add_bcast_station(struct iwl_priv *priv) 1123void iwl_add_bcast_station(struct iwl_priv *priv)
1087{ 1124{
1125 IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n");
1088 iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL); 1126 iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL);
1089 1127
1090 /* Set up default rate scaling table in device's station table */ 1128 /* Set up default rate scaling table in device's station table */
@@ -1093,6 +1131,16 @@ void iwl_add_bcast_station(struct iwl_priv *priv)
1093EXPORT_SYMBOL(iwl_add_bcast_station); 1131EXPORT_SYMBOL(iwl_add_bcast_station);
1094 1132
1095/** 1133/**
1134 * iwl3945_add_bcast_station - add broadcast station into station table.
1135 */
1136void iwl3945_add_bcast_station(struct iwl_priv *priv)
1137{
1138 IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n");
1139 iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL);
1140}
1141EXPORT_SYMBOL(iwl3945_add_bcast_station);
1142
1143/**
1096 * iwl_get_sta_id - Find station's index within station table 1144 * iwl_get_sta_id - Find station's index within station table
1097 * 1145 *
1098 * If new IBSS station, create new entry in station table 1146 * If new IBSS station, create new entry in station table
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 8d052de2d405..2dc35fe28f56 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -53,6 +53,7 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
53 53
54int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap); 54int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
55void iwl_add_bcast_station(struct iwl_priv *priv); 55void iwl_add_bcast_station(struct iwl_priv *priv);
56void iwl3945_add_bcast_station(struct iwl_priv *priv);
56int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap); 57int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
57void iwl_clear_stations_table(struct iwl_priv *priv); 58void iwl_clear_stations_table(struct iwl_priv *priv);
58int iwl_get_free_ucode_key_index(struct iwl_priv *priv); 59int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 8f4071562857..1ed5206721ec 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -60,7 +60,8 @@ static const u16 default_tid_to_tx_fifo[] = {
60static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv, 60static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
61 struct iwl_dma_ptr *ptr, size_t size) 61 struct iwl_dma_ptr *ptr, size_t size)
62{ 62{
63 ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma); 63 ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
64 GFP_KERNEL);
64 if (!ptr->addr) 65 if (!ptr->addr)
65 return -ENOMEM; 66 return -ENOMEM;
66 ptr->size = size; 67 ptr->size = size;
@@ -73,21 +74,20 @@ static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
73 if (unlikely(!ptr->addr)) 74 if (unlikely(!ptr->addr))
74 return; 75 return;
75 76
76 pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma); 77 dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
77 memset(ptr, 0, sizeof(*ptr)); 78 memset(ptr, 0, sizeof(*ptr));
78} 79}
79 80
80/** 81/**
81 * iwl_txq_update_write_ptr - Send new write index to hardware 82 * iwl_txq_update_write_ptr - Send new write index to hardware
82 */ 83 */
83int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) 84void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
84{ 85{
85 u32 reg = 0; 86 u32 reg = 0;
86 int ret = 0;
87 int txq_id = txq->q.id; 87 int txq_id = txq->q.id;
88 88
89 if (txq->need_update == 0) 89 if (txq->need_update == 0)
90 return ret; 90 return;
91 91
92 /* if we're trying to save power */ 92 /* if we're trying to save power */
93 if (test_bit(STATUS_POWER_PMI, &priv->status)) { 93 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
@@ -101,7 +101,7 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
101 txq_id, reg); 101 txq_id, reg);
102 iwl_set_bit(priv, CSR_GP_CNTRL, 102 iwl_set_bit(priv, CSR_GP_CNTRL,
103 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 103 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
104 return ret; 104 return;
105 } 105 }
106 106
107 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 107 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
@@ -114,8 +114,6 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
114 txq->q.write_ptr | (txq_id << 8)); 114 txq->q.write_ptr | (txq_id << 8));
115 115
116 txq->need_update = 0; 116 txq->need_update = 0;
117
118 return ret;
119} 117}
120EXPORT_SYMBOL(iwl_txq_update_write_ptr); 118EXPORT_SYMBOL(iwl_txq_update_write_ptr);
121 119
@@ -146,7 +144,7 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
146{ 144{
147 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 145 struct iwl_tx_queue *txq = &priv->txq[txq_id];
148 struct iwl_queue *q = &txq->q; 146 struct iwl_queue *q = &txq->q;
149 struct pci_dev *dev = priv->pci_dev; 147 struct device *dev = &priv->pci_dev->dev;
150 int i; 148 int i;
151 149
152 if (q->n_bd == 0) 150 if (q->n_bd == 0)
@@ -163,8 +161,8 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
163 161
164 /* De-alloc circular buffer of TFDs */ 162 /* De-alloc circular buffer of TFDs */
165 if (txq->q.n_bd) 163 if (txq->q.n_bd)
166 pci_free_consistent(dev, priv->hw_params.tfd_size * 164 dma_free_coherent(dev, priv->hw_params.tfd_size *
167 txq->q.n_bd, txq->tfds, txq->q.dma_addr); 165 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
168 166
169 /* De-alloc array of per-TFD driver data */ 167 /* De-alloc array of per-TFD driver data */
170 kfree(txq->txb); 168 kfree(txq->txb);
@@ -193,7 +191,7 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
193{ 191{
194 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 192 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
195 struct iwl_queue *q = &txq->q; 193 struct iwl_queue *q = &txq->q;
196 struct pci_dev *dev = priv->pci_dev; 194 struct device *dev = &priv->pci_dev->dev;
197 int i; 195 int i;
198 196
199 if (q->n_bd == 0) 197 if (q->n_bd == 0)
@@ -205,8 +203,8 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
205 203
206 /* De-alloc circular buffer of TFDs */ 204 /* De-alloc circular buffer of TFDs */
207 if (txq->q.n_bd) 205 if (txq->q.n_bd)
208 pci_free_consistent(dev, priv->hw_params.tfd_size * 206 dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
209 txq->q.n_bd, txq->tfds, txq->q.dma_addr); 207 txq->tfds, txq->q.dma_addr);
210 208
211 /* deallocate arrays */ 209 /* deallocate arrays */
212 kfree(txq->cmd); 210 kfree(txq->cmd);
@@ -297,7 +295,7 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
297static int iwl_tx_queue_alloc(struct iwl_priv *priv, 295static int iwl_tx_queue_alloc(struct iwl_priv *priv,
298 struct iwl_tx_queue *txq, u32 id) 296 struct iwl_tx_queue *txq, u32 id)
299{ 297{
300 struct pci_dev *dev = priv->pci_dev; 298 struct device *dev = &priv->pci_dev->dev;
301 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; 299 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
302 300
303 /* Driver private data, only for Tx (not command) queues, 301 /* Driver private data, only for Tx (not command) queues,
@@ -316,8 +314,8 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
316 314
317 /* Circular buffer of transmit frame descriptors (TFDs), 315 /* Circular buffer of transmit frame descriptors (TFDs),
318 * shared with device */ 316 * shared with device */
319 txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr); 317 txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
320 318 GFP_KERNEL);
321 if (!txq->tfds) { 319 if (!txq->tfds) {
322 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz); 320 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
323 goto error; 321 goto error;
@@ -366,7 +364,7 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
366 for (i = 0; i < actual_slots; i++) { 364 for (i = 0; i < actual_slots; i++) {
367 /* only happens for cmd queue */ 365 /* only happens for cmd queue */
368 if (i == slots_num) 366 if (i == slots_num)
369 len += IWL_MAX_SCAN_SIZE; 367 len = IWL_MAX_CMD_SIZE;
370 368
371 txq->cmd[i] = kmalloc(len, GFP_KERNEL); 369 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
372 if (!txq->cmd[i]) 370 if (!txq->cmd[i])
@@ -745,7 +743,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
745 u8 tid = 0; 743 u8 tid = 0;
746 u8 *qc = NULL; 744 u8 *qc = NULL;
747 unsigned long flags; 745 unsigned long flags;
748 int ret;
749 746
750 spin_lock_irqsave(&priv->lock, flags); 747 spin_lock_irqsave(&priv->lock, flags);
751 if (iwl_is_rfkill(priv)) { 748 if (iwl_is_rfkill(priv)) {
@@ -820,8 +817,10 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
820 hdr->seq_ctrl |= cpu_to_le16(seq_number); 817 hdr->seq_ctrl |= cpu_to_le16(seq_number);
821 seq_number += 0x10; 818 seq_number += 0x10;
822 /* aggregation is on for this <sta,tid> */ 819 /* aggregation is on for this <sta,tid> */
823 if (info->flags & IEEE80211_TX_CTL_AMPDU) 820 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
821 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
824 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; 822 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
823 }
825 } 824 }
826 825
827 txq = &priv->txq[txq_id]; 826 txq = &priv->txq[txq_id];
@@ -963,7 +962,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
963 962
964 /* Tell device the write index *just past* this latest filled TFD */ 963 /* Tell device the write index *just past* this latest filled TFD */
965 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 964 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
966 ret = iwl_txq_update_write_ptr(priv, txq); 965 iwl_txq_update_write_ptr(priv, txq);
967 spin_unlock_irqrestore(&priv->lock, flags); 966 spin_unlock_irqrestore(&priv->lock, flags);
968 967
969 /* 968 /*
@@ -977,9 +976,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
977 if (sta_priv && sta_priv->client) 976 if (sta_priv && sta_priv->client)
978 atomic_inc(&sta_priv->pending_frames); 977 atomic_inc(&sta_priv->pending_frames);
979 978
980 if (ret)
981 return ret;
982
983 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { 979 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
984 if (wait_write_ptr) { 980 if (wait_write_ptr) {
985 spin_lock_irqsave(&priv->lock, flags); 981 spin_lock_irqsave(&priv->lock, flags);
@@ -1018,7 +1014,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1018 struct iwl_cmd_meta *out_meta; 1014 struct iwl_cmd_meta *out_meta;
1019 dma_addr_t phys_addr; 1015 dma_addr_t phys_addr;
1020 unsigned long flags; 1016 unsigned long flags;
1021 int len, ret; 1017 int len;
1022 u32 idx; 1018 u32 idx;
1023 u16 fix_size; 1019 u16 fix_size;
1024 1020
@@ -1027,9 +1023,12 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1027 1023
1028 /* If any of the command structures end up being larger than 1024 /* If any of the command structures end up being larger than
1029 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then 1025 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
1030 * we will need to increase the size of the TFD entries */ 1026 * we will need to increase the size of the TFD entries
1027 * Also, check to see if command buffer should not exceed the size
1028 * of device_cmd and max_cmd_size. */
1031 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && 1029 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
1032 !(cmd->flags & CMD_SIZE_HUGE)); 1030 !(cmd->flags & CMD_SIZE_HUGE));
1031 BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
1033 1032
1034 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) { 1033 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
1035 IWL_WARN(priv, "Not sending command - %s KILL\n", 1034 IWL_WARN(priv, "Not sending command - %s KILL\n",
@@ -1073,8 +1072,8 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1073 if (cmd->flags & CMD_SIZE_HUGE) 1072 if (cmd->flags & CMD_SIZE_HUGE)
1074 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; 1073 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
1075 len = sizeof(struct iwl_device_cmd); 1074 len = sizeof(struct iwl_device_cmd);
1076 len += (idx == TFD_CMD_SLOTS) ? IWL_MAX_SCAN_SIZE : 0; 1075 if (idx == TFD_CMD_SLOTS)
1077 1076 len = IWL_MAX_CMD_SIZE;
1078 1077
1079#ifdef CONFIG_IWLWIFI_DEBUG 1078#ifdef CONFIG_IWLWIFI_DEBUG
1080 switch (out_cmd->hdr.cmd) { 1079 switch (out_cmd->hdr.cmd) {
@@ -1115,10 +1114,10 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1115 1114
1116 /* Increment and update queue's write index */ 1115 /* Increment and update queue's write index */
1117 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 1116 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1118 ret = iwl_txq_update_write_ptr(priv, txq); 1117 iwl_txq_update_write_ptr(priv, txq);
1119 1118
1120 spin_unlock_irqrestore(&priv->hcmd_lock, flags); 1119 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
1121 return ret ? ret : idx; 1120 return idx;
1122} 1121}
1123 1122
1124static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb) 1123static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
@@ -1260,6 +1259,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1260 1259
1261 if (!(meta->flags & CMD_ASYNC)) { 1260 if (!(meta->flags & CMD_ASYNC)) {
1262 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 1261 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1262 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n",
1263 get_cmd_string(cmd->hdr.cmd));
1263 wake_up_interruptible(&priv->wait_command_queue); 1264 wake_up_interruptible(&priv->wait_command_queue);
1264 } 1265 }
1265} 1266}
@@ -1346,7 +1347,7 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1346{ 1347{
1347 int tx_fifo_id, txq_id, sta_id, ssn = -1; 1348 int tx_fifo_id, txq_id, sta_id, ssn = -1;
1348 struct iwl_tid_data *tid_data; 1349 struct iwl_tid_data *tid_data;
1349 int ret, write_ptr, read_ptr; 1350 int write_ptr, read_ptr;
1350 unsigned long flags; 1351 unsigned long flags;
1351 1352
1352 if (!ra) { 1353 if (!ra) {
@@ -1398,13 +1399,17 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1398 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; 1399 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1399 1400
1400 spin_lock_irqsave(&priv->lock, flags); 1401 spin_lock_irqsave(&priv->lock, flags);
1401 ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn, 1402 /*
1403 * the only reason this call can fail is queue number out of range,
1404 * which can happen if uCode is reloaded and all the station
1405 * information are lost. if it is outside the range, there is no need
1406 * to deactivate the uCode queue, just return "success" to allow
1407 * mac80211 to clean up it own data.
1408 */
1409 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
1402 tx_fifo_id); 1410 tx_fifo_id);
1403 spin_unlock_irqrestore(&priv->lock, flags); 1411 spin_unlock_irqrestore(&priv->lock, flags);
1404 1412
1405 if (ret)
1406 return ret;
1407
1408 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid); 1413 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
1409 1414
1410 return 0; 1415 return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index f8e4e4b18d02..54daa38ecba3 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -53,9 +53,10 @@
53#include "iwl-commands.h" 53#include "iwl-commands.h"
54#include "iwl-sta.h" 54#include "iwl-sta.h"
55#include "iwl-3945.h" 55#include "iwl-3945.h"
56#include "iwl-helpers.h"
57#include "iwl-core.h" 56#include "iwl-core.h"
57#include "iwl-helpers.h"
58#include "iwl-dev.h" 58#include "iwl-dev.h"
59#include "iwl-spectrum.h"
59 60
60/* 61/*
61 * module name, copyright, version, etc. 62 * module name, copyright, version, etc.
@@ -70,14 +71,13 @@
70#define VD 71#define VD
71#endif 72#endif
72 73
73#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT 74/*
74#define VS "s" 75 * add "s" to indicate spectrum measurement included.
75#else 76 * we add it here to be consistent with previous releases in which
76#define VS 77 * this was configurable.
77#endif 78 */
78 79#define DRV_VERSION IWLWIFI_VERSION VD "s"
79#define DRV_VERSION IWLWIFI_VERSION VD VS 80#define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation"
80#define DRV_COPYRIGHT "Copyright(c) 2003-2009 Intel Corporation"
81#define DRV_AUTHOR "<ilw@linux.intel.com>" 81#define DRV_AUTHOR "<ilw@linux.intel.com>"
82 82
83MODULE_DESCRIPTION(DRV_DESCRIPTION); 83MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -352,10 +352,10 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
352static void iwl3945_unset_hw_params(struct iwl_priv *priv) 352static void iwl3945_unset_hw_params(struct iwl_priv *priv)
353{ 353{
354 if (priv->shared_virt) 354 if (priv->shared_virt)
355 pci_free_consistent(priv->pci_dev, 355 dma_free_coherent(&priv->pci_dev->dev,
356 sizeof(struct iwl3945_shared), 356 sizeof(struct iwl3945_shared),
357 priv->shared_virt, 357 priv->shared_virt,
358 priv->shared_phys); 358 priv->shared_phys);
359} 359}
360 360
361static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv, 361static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
@@ -478,7 +478,6 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
478 u8 wait_write_ptr = 0; 478 u8 wait_write_ptr = 0;
479 u8 *qc = NULL; 479 u8 *qc = NULL;
480 unsigned long flags; 480 unsigned long flags;
481 int rc;
482 481
483 spin_lock_irqsave(&priv->lock, flags); 482 spin_lock_irqsave(&priv->lock, flags);
484 if (iwl_is_rfkill(priv)) { 483 if (iwl_is_rfkill(priv)) {
@@ -663,12 +662,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
663 662
664 /* Tell device the write index *just past* this latest filled TFD */ 663 /* Tell device the write index *just past* this latest filled TFD */
665 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 664 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
666 rc = iwl_txq_update_write_ptr(priv, txq); 665 iwl_txq_update_write_ptr(priv, txq);
667 spin_unlock_irqrestore(&priv->lock, flags); 666 spin_unlock_irqrestore(&priv->lock, flags);
668 667
669 if (rc)
670 return rc;
671
672 if ((iwl_queue_space(q) < q->high_mark) 668 if ((iwl_queue_space(q) < q->high_mark)
673 && priv->mac80211_registered) { 669 && priv->mac80211_registered) {
674 if (wait_write_ptr) { 670 if (wait_write_ptr) {
@@ -689,10 +685,6 @@ drop:
689 return -1; 685 return -1;
690} 686}
691 687
692#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
693
694#include "iwl-spectrum.h"
695
696#define BEACON_TIME_MASK_LOW 0x00FFFFFF 688#define BEACON_TIME_MASK_LOW 0x00FFFFFF
697#define BEACON_TIME_MASK_HIGH 0xFF000000 689#define BEACON_TIME_MASK_HIGH 0xFF000000
698#define TIME_UNIT 1024 690#define TIME_UNIT 1024
@@ -819,7 +811,6 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
819 811
820 return rc; 812 return rc;
821} 813}
822#endif
823 814
824static void iwl3945_rx_reply_alive(struct iwl_priv *priv, 815static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
825 struct iwl_rx_mem_buffer *rxb) 816 struct iwl_rx_mem_buffer *rxb)
@@ -962,6 +953,8 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
962 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta; 953 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
963 priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error; 954 priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
964 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; 955 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
956 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
957 iwl_rx_spectrum_measure_notif;
965 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; 958 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
966 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = 959 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
967 iwl_rx_pm_debug_statistics_notif; 960 iwl_rx_pm_debug_statistics_notif;
@@ -975,7 +968,6 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
975 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_hw_rx_statistics; 968 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_hw_rx_statistics;
976 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics; 969 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
977 970
978 iwl_setup_spectrum_handlers(priv);
979 iwl_setup_rx_scan_handlers(priv); 971 iwl_setup_rx_scan_handlers(priv);
980 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif; 972 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
981 973
@@ -1067,13 +1059,13 @@ static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
1067 * also updates the memory address in the firmware to reference the new 1059 * also updates the memory address in the firmware to reference the new
1068 * target buffer. 1060 * target buffer.
1069 */ 1061 */
1070static int iwl3945_rx_queue_restock(struct iwl_priv *priv) 1062static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
1071{ 1063{
1072 struct iwl_rx_queue *rxq = &priv->rxq; 1064 struct iwl_rx_queue *rxq = &priv->rxq;
1073 struct list_head *element; 1065 struct list_head *element;
1074 struct iwl_rx_mem_buffer *rxb; 1066 struct iwl_rx_mem_buffer *rxb;
1075 unsigned long flags; 1067 unsigned long flags;
1076 int write, rc; 1068 int write;
1077 1069
1078 spin_lock_irqsave(&rxq->lock, flags); 1070 spin_lock_irqsave(&rxq->lock, flags);
1079 write = rxq->write & ~0x7; 1071 write = rxq->write & ~0x7;
@@ -1103,12 +1095,8 @@ static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
1103 spin_lock_irqsave(&rxq->lock, flags); 1095 spin_lock_irqsave(&rxq->lock, flags);
1104 rxq->need_update = 1; 1096 rxq->need_update = 1;
1105 spin_unlock_irqrestore(&rxq->lock, flags); 1097 spin_unlock_irqrestore(&rxq->lock, flags);
1106 rc = iwl_rx_queue_update_write_ptr(priv, rxq); 1098 iwl_rx_queue_update_write_ptr(priv, rxq);
1107 if (rc)
1108 return rc;
1109 } 1099 }
1110
1111 return 0;
1112} 1100}
1113 1101
1114/** 1102/**
@@ -1253,10 +1241,10 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
1253 } 1241 }
1254 } 1242 }
1255 1243
1256 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd, 1244 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1257 rxq->dma_addr); 1245 rxq->dma_addr);
1258 pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status), 1246 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
1259 rxq->rb_stts, rxq->rb_stts_dma); 1247 rxq->rb_stts, rxq->rb_stts_dma);
1260 rxq->bd = NULL; 1248 rxq->bd = NULL;
1261 rxq->rb_stts = NULL; 1249 rxq->rb_stts = NULL;
1262} 1250}
@@ -1518,8 +1506,9 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1518 * iwl3945_print_event_log - Dump error event log to syslog 1506 * iwl3945_print_event_log - Dump error event log to syslog
1519 * 1507 *
1520 */ 1508 */
1521static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx, 1509static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1522 u32 num_events, u32 mode) 1510 u32 num_events, u32 mode,
1511 int pos, char **buf, size_t bufsz)
1523{ 1512{
1524 u32 i; 1513 u32 i;
1525 u32 base; /* SRAM byte address of event log header */ 1514 u32 base; /* SRAM byte address of event log header */
@@ -1529,7 +1518,7 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1529 unsigned long reg_flags; 1518 unsigned long reg_flags;
1530 1519
1531 if (num_events == 0) 1520 if (num_events == 0)
1532 return; 1521 return pos;
1533 1522
1534 base = le32_to_cpu(priv->card_alive.log_event_table_ptr); 1523 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1535 1524
@@ -1555,26 +1544,43 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1555 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1544 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1556 if (mode == 0) { 1545 if (mode == 0) {
1557 /* data, ev */ 1546 /* data, ev */
1558 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev); 1547 if (bufsz) {
1559 trace_iwlwifi_dev_ucode_event(priv, 0, time, ev); 1548 pos += scnprintf(*buf + pos, bufsz - pos,
1549 "0x%08x:%04u\n",
1550 time, ev);
1551 } else {
1552 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
1553 trace_iwlwifi_dev_ucode_event(priv, 0,
1554 time, ev);
1555 }
1560 } else { 1556 } else {
1561 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1557 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1562 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", time, data, ev); 1558 if (bufsz) {
1563 trace_iwlwifi_dev_ucode_event(priv, time, data, ev); 1559 pos += scnprintf(*buf + pos, bufsz - pos,
1560 "%010u:0x%08x:%04u\n",
1561 time, data, ev);
1562 } else {
1563 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n",
1564 time, data, ev);
1565 trace_iwlwifi_dev_ucode_event(priv, time,
1566 data, ev);
1567 }
1564 } 1568 }
1565 } 1569 }
1566 1570
1567 /* Allow device to power down */ 1571 /* Allow device to power down */
1568 iwl_release_nic_access(priv); 1572 iwl_release_nic_access(priv);
1569 spin_unlock_irqrestore(&priv->reg_lock, reg_flags); 1573 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
1574 return pos;
1570} 1575}
1571 1576
1572/** 1577/**
1573 * iwl3945_print_last_event_logs - Dump the newest # of event log to syslog 1578 * iwl3945_print_last_event_logs - Dump the newest # of event log to syslog
1574 */ 1579 */
1575static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity, 1580static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1576 u32 num_wraps, u32 next_entry, 1581 u32 num_wraps, u32 next_entry,
1577 u32 size, u32 mode) 1582 u32 size, u32 mode,
1583 int pos, char **buf, size_t bufsz)
1578{ 1584{
1579 /* 1585 /*
1580 * display the newest DEFAULT_LOG_ENTRIES entries 1586 * display the newest DEFAULT_LOG_ENTRIES entries
@@ -1582,21 +1588,28 @@ static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1582 */ 1588 */
1583 if (num_wraps) { 1589 if (num_wraps) {
1584 if (next_entry < size) { 1590 if (next_entry < size) {
1585 iwl3945_print_event_log(priv, 1591 pos = iwl3945_print_event_log(priv,
1586 capacity - (size - next_entry), 1592 capacity - (size - next_entry),
1587 size - next_entry, mode); 1593 size - next_entry, mode,
1588 iwl3945_print_event_log(priv, 0, 1594 pos, buf, bufsz);
1589 next_entry, mode); 1595 pos = iwl3945_print_event_log(priv, 0,
1596 next_entry, mode,
1597 pos, buf, bufsz);
1590 } else 1598 } else
1591 iwl3945_print_event_log(priv, next_entry - size, 1599 pos = iwl3945_print_event_log(priv, next_entry - size,
1592 size, mode); 1600 size, mode,
1601 pos, buf, bufsz);
1593 } else { 1602 } else {
1594 if (next_entry < size) 1603 if (next_entry < size)
1595 iwl3945_print_event_log(priv, 0, next_entry, mode); 1604 pos = iwl3945_print_event_log(priv, 0,
1605 next_entry, mode,
1606 pos, buf, bufsz);
1596 else 1607 else
1597 iwl3945_print_event_log(priv, next_entry - size, 1608 pos = iwl3945_print_event_log(priv, next_entry - size,
1598 size, mode); 1609 size, mode,
1610 pos, buf, bufsz);
1599 } 1611 }
1612 return pos;
1600} 1613}
1601 1614
1602/* For sanity check only. Actual size is determined by uCode, typ. 512 */ 1615/* For sanity check only. Actual size is determined by uCode, typ. 512 */
@@ -1604,7 +1617,8 @@ static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1604 1617
1605#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20) 1618#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20)
1606 1619
1607void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log) 1620int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1621 char **buf, bool display)
1608{ 1622{
1609 u32 base; /* SRAM byte address of event log header */ 1623 u32 base; /* SRAM byte address of event log header */
1610 u32 capacity; /* event log capacity in # entries */ 1624 u32 capacity; /* event log capacity in # entries */
@@ -1612,11 +1626,13 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1612 u32 num_wraps; /* # times uCode wrapped to top of log */ 1626 u32 num_wraps; /* # times uCode wrapped to top of log */
1613 u32 next_entry; /* index of next entry to be written by uCode */ 1627 u32 next_entry; /* index of next entry to be written by uCode */
1614 u32 size; /* # entries that we'll print */ 1628 u32 size; /* # entries that we'll print */
1629 int pos = 0;
1630 size_t bufsz = 0;
1615 1631
1616 base = le32_to_cpu(priv->card_alive.log_event_table_ptr); 1632 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1617 if (!iwl3945_hw_valid_rtc_data_addr(base)) { 1633 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
1618 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base); 1634 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
1619 return; 1635 return -EINVAL;
1620 } 1636 }
1621 1637
1622 /* event log header */ 1638 /* event log header */
@@ -1642,7 +1658,7 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1642 /* bail out if nothing in log */ 1658 /* bail out if nothing in log */
1643 if (size == 0) { 1659 if (size == 0) {
1644 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n"); 1660 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1645 return; 1661 return pos;
1646 } 1662 }
1647 1663
1648#ifdef CONFIG_IWLWIFI_DEBUG 1664#ifdef CONFIG_IWLWIFI_DEBUG
@@ -1658,25 +1674,38 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1658 size); 1674 size);
1659 1675
1660#ifdef CONFIG_IWLWIFI_DEBUG 1676#ifdef CONFIG_IWLWIFI_DEBUG
1677 if (display) {
1678 if (full_log)
1679 bufsz = capacity * 48;
1680 else
1681 bufsz = size * 48;
1682 *buf = kmalloc(bufsz, GFP_KERNEL);
1683 if (!*buf)
1684 return -ENOMEM;
1685 }
1661 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { 1686 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1662 /* if uCode has wrapped back to top of log, 1687 /* if uCode has wrapped back to top of log,
1663 * start at the oldest entry, 1688 * start at the oldest entry,
1664 * i.e the next one that uCode would fill. 1689 * i.e the next one that uCode would fill.
1665 */ 1690 */
1666 if (num_wraps) 1691 if (num_wraps)
1667 iwl3945_print_event_log(priv, next_entry, 1692 pos = iwl3945_print_event_log(priv, next_entry,
1668 capacity - next_entry, mode); 1693 capacity - next_entry, mode,
1694 pos, buf, bufsz);
1669 1695
1670 /* (then/else) start at top of log */ 1696 /* (then/else) start at top of log */
1671 iwl3945_print_event_log(priv, 0, next_entry, mode); 1697 pos = iwl3945_print_event_log(priv, 0, next_entry, mode,
1698 pos, buf, bufsz);
1672 } else 1699 } else
1673 iwl3945_print_last_event_logs(priv, capacity, num_wraps, 1700 pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
1674 next_entry, size, mode); 1701 next_entry, size, mode,
1702 pos, buf, bufsz);
1675#else 1703#else
1676 iwl3945_print_last_event_logs(priv, capacity, num_wraps, 1704 pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
1677 next_entry, size, mode); 1705 next_entry, size, mode,
1706 pos, buf, bufsz);
1678#endif 1707#endif
1679 1708 return pos;
1680} 1709}
1681 1710
1682static void iwl3945_irq_tasklet(struct iwl_priv *priv) 1711static void iwl3945_irq_tasklet(struct iwl_priv *priv)
@@ -2996,18 +3025,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
2996 mutex_unlock(&priv->mutex); 3025 mutex_unlock(&priv->mutex);
2997} 3026}
2998 3027
2999static void iwl3945_bg_up(struct work_struct *data)
3000{
3001 struct iwl_priv *priv = container_of(data, struct iwl_priv, up);
3002
3003 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3004 return;
3005
3006 mutex_lock(&priv->mutex);
3007 __iwl3945_up(priv);
3008 mutex_unlock(&priv->mutex);
3009}
3010
3011static void iwl3945_bg_restart(struct work_struct *data) 3028static void iwl3945_bg_restart(struct work_struct *data)
3012{ 3029{
3013 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); 3030 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
@@ -3024,7 +3041,13 @@ static void iwl3945_bg_restart(struct work_struct *data)
3024 ieee80211_restart_hw(priv->hw); 3041 ieee80211_restart_hw(priv->hw);
3025 } else { 3042 } else {
3026 iwl3945_down(priv); 3043 iwl3945_down(priv);
3027 queue_work(priv->workqueue, &priv->up); 3044
3045 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3046 return;
3047
3048 mutex_lock(&priv->mutex);
3049 __iwl3945_up(priv);
3050 mutex_unlock(&priv->mutex);
3028 } 3051 }
3029} 3052}
3030 3053
@@ -3528,8 +3551,6 @@ static ssize_t store_filter_flags(struct device *d,
3528static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, 3551static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
3529 store_filter_flags); 3552 store_filter_flags);
3530 3553
3531#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
3532
3533static ssize_t show_measurement(struct device *d, 3554static ssize_t show_measurement(struct device *d,
3534 struct device_attribute *attr, char *buf) 3555 struct device_attribute *attr, char *buf)
3535{ 3556{
@@ -3599,7 +3620,6 @@ static ssize_t store_measurement(struct device *d,
3599 3620
3600static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, 3621static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
3601 show_measurement, store_measurement); 3622 show_measurement, store_measurement);
3602#endif /* CONFIG_IWL3945_SPECTRUM_MEASUREMENT */
3603 3623
3604static ssize_t store_retry_rate(struct device *d, 3624static ssize_t store_retry_rate(struct device *d,
3605 struct device_attribute *attr, 3625 struct device_attribute *attr,
@@ -3748,7 +3768,6 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
3748 3768
3749 init_waitqueue_head(&priv->wait_command_queue); 3769 init_waitqueue_head(&priv->wait_command_queue);
3750 3770
3751 INIT_WORK(&priv->up, iwl3945_bg_up);
3752 INIT_WORK(&priv->restart, iwl3945_bg_restart); 3771 INIT_WORK(&priv->restart, iwl3945_bg_restart);
3753 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish); 3772 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
3754 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update); 3773 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
@@ -3782,9 +3801,7 @@ static struct attribute *iwl3945_sysfs_entries[] = {
3782 &dev_attr_dump_errors.attr, 3801 &dev_attr_dump_errors.attr,
3783 &dev_attr_flags.attr, 3802 &dev_attr_flags.attr,
3784 &dev_attr_filter_flags.attr, 3803 &dev_attr_filter_flags.attr,
3785#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
3786 &dev_attr_measurement.attr, 3804 &dev_attr_measurement.attr,
3787#endif
3788 &dev_attr_retry_rate.attr, 3805 &dev_attr_retry_rate.attr,
3789 &dev_attr_statistics.attr, 3806 &dev_attr_statistics.attr,
3790 &dev_attr_status.attr, 3807 &dev_attr_status.attr,
@@ -3810,7 +3827,6 @@ static struct ieee80211_ops iwl3945_hw_ops = {
3810 .config = iwl_mac_config, 3827 .config = iwl_mac_config,
3811 .configure_filter = iwl_configure_filter, 3828 .configure_filter = iwl_configure_filter,
3812 .set_key = iwl3945_mac_set_key, 3829 .set_key = iwl3945_mac_set_key,
3813 .get_tx_stats = iwl_mac_get_tx_stats,
3814 .conf_tx = iwl_mac_conf_tx, 3830 .conf_tx = iwl_mac_conf_tx,
3815 .reset_tsf = iwl_mac_reset_tsf, 3831 .reset_tsf = iwl_mac_reset_tsf,
3816 .bss_info_changed = iwl_bss_info_changed, 3832 .bss_info_changed = iwl_bss_info_changed,
@@ -3831,6 +3847,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3831 INIT_LIST_HEAD(&priv->free_frames); 3847 INIT_LIST_HEAD(&priv->free_frames);
3832 3848
3833 mutex_init(&priv->mutex); 3849 mutex_init(&priv->mutex);
3850 mutex_init(&priv->sync_cmd_mutex);
3834 3851
3835 /* Clear the driver's (not device's) station table */ 3852 /* Clear the driver's (not device's) station table */
3836 iwl_clear_stations_table(priv); 3853 iwl_clear_stations_table(priv);
@@ -3840,6 +3857,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3840 priv->band = IEEE80211_BAND_2GHZ; 3857 priv->band = IEEE80211_BAND_2GHZ;
3841 3858
3842 priv->iw_mode = NL80211_IFTYPE_STATION; 3859 priv->iw_mode = NL80211_IFTYPE_STATION;
3860 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3843 3861
3844 iwl_reset_qos(priv); 3862 iwl_reset_qos(priv);
3845 3863
@@ -4022,6 +4040,13 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4022 spin_lock_init(&priv->reg_lock); 4040 spin_lock_init(&priv->reg_lock);
4023 spin_lock_init(&priv->lock); 4041 spin_lock_init(&priv->lock);
4024 4042
4043 /*
4044 * stop and reset the on-board processor just in case it is in a
4045 * strange state ... like being left stranded by a primary kernel
4046 * and this is now the kdump kernel trying to start up
4047 */
4048 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4049
4025 /*********************** 4050 /***********************
4026 * 4. Read EEPROM 4051 * 4. Read EEPROM
4027 * ********************/ 4052 * ********************/
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 842811142bef..79ffa3b98d73 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -268,7 +268,7 @@ struct iwm_priv {
268 268
269 struct sk_buff_head rx_list; 269 struct sk_buff_head rx_list;
270 struct list_head rx_tickets; 270 struct list_head rx_tickets;
271 struct list_head rx_packets[IWM_RX_ID_HASH + 1]; 271 struct list_head rx_packets[IWM_RX_ID_HASH];
272 struct workqueue_struct *rx_wq; 272 struct workqueue_struct *rx_wq;
273 struct work_struct rx_worker; 273 struct work_struct rx_worker;
274 274
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index f727b4a83196..ad8f7eabb5aa 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -868,36 +868,35 @@ static int iwm_mlme_mgt_frame(struct iwm_priv *iwm, u8 *buf,
868 struct iwm_umac_notif_mgt_frame *mgt_frame = 868 struct iwm_umac_notif_mgt_frame *mgt_frame =
869 (struct iwm_umac_notif_mgt_frame *)buf; 869 (struct iwm_umac_notif_mgt_frame *)buf;
870 struct ieee80211_mgmt *mgt = (struct ieee80211_mgmt *)mgt_frame->frame; 870 struct ieee80211_mgmt *mgt = (struct ieee80211_mgmt *)mgt_frame->frame;
871 u8 *ie;
872 871
873 IWM_HEXDUMP(iwm, DBG, MLME, "MGT: ", mgt_frame->frame, 872 IWM_HEXDUMP(iwm, DBG, MLME, "MGT: ", mgt_frame->frame,
874 le16_to_cpu(mgt_frame->len)); 873 le16_to_cpu(mgt_frame->len));
875 874
876 if (ieee80211_is_assoc_req(mgt->frame_control)) { 875 if (ieee80211_is_assoc_req(mgt->frame_control)) {
877 ie = mgt->u.assoc_req.variable;; 876 iwm->req_ie_len = le16_to_cpu(mgt_frame->len)
878 iwm->req_ie_len = 877 - offsetof(struct ieee80211_mgmt,
879 le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt); 878 u.assoc_req.variable);
880 kfree(iwm->req_ie); 879 kfree(iwm->req_ie);
881 iwm->req_ie = kmemdup(mgt->u.assoc_req.variable, 880 iwm->req_ie = kmemdup(mgt->u.assoc_req.variable,
882 iwm->req_ie_len, GFP_KERNEL); 881 iwm->req_ie_len, GFP_KERNEL);
883 } else if (ieee80211_is_reassoc_req(mgt->frame_control)) { 882 } else if (ieee80211_is_reassoc_req(mgt->frame_control)) {
884 ie = mgt->u.reassoc_req.variable;; 883 iwm->req_ie_len = le16_to_cpu(mgt_frame->len)
885 iwm->req_ie_len = 884 - offsetof(struct ieee80211_mgmt,
886 le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt); 885 u.reassoc_req.variable);
887 kfree(iwm->req_ie); 886 kfree(iwm->req_ie);
888 iwm->req_ie = kmemdup(mgt->u.reassoc_req.variable, 887 iwm->req_ie = kmemdup(mgt->u.reassoc_req.variable,
889 iwm->req_ie_len, GFP_KERNEL); 888 iwm->req_ie_len, GFP_KERNEL);
890 } else if (ieee80211_is_assoc_resp(mgt->frame_control)) { 889 } else if (ieee80211_is_assoc_resp(mgt->frame_control)) {
891 ie = mgt->u.assoc_resp.variable;; 890 iwm->resp_ie_len = le16_to_cpu(mgt_frame->len)
892 iwm->resp_ie_len = 891 - offsetof(struct ieee80211_mgmt,
893 le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt); 892 u.assoc_resp.variable);
894 kfree(iwm->resp_ie); 893 kfree(iwm->resp_ie);
895 iwm->resp_ie = kmemdup(mgt->u.assoc_resp.variable, 894 iwm->resp_ie = kmemdup(mgt->u.assoc_resp.variable,
896 iwm->resp_ie_len, GFP_KERNEL); 895 iwm->resp_ie_len, GFP_KERNEL);
897 } else if (ieee80211_is_reassoc_resp(mgt->frame_control)) { 896 } else if (ieee80211_is_reassoc_resp(mgt->frame_control)) {
898 ie = mgt->u.reassoc_resp.variable;; 897 iwm->resp_ie_len = le16_to_cpu(mgt_frame->len)
899 iwm->resp_ie_len = 898 - offsetof(struct ieee80211_mgmt,
900 le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt); 899 u.reassoc_resp.variable);
901 kfree(iwm->resp_ie); 900 kfree(iwm->resp_ie);
902 iwm->resp_ie = kmemdup(mgt->u.reassoc_resp.variable, 901 iwm->resp_ie = kmemdup(mgt->u.reassoc_resp.variable,
903 iwm->resp_ie_len, GFP_KERNEL); 902 iwm->resp_ie_len, GFP_KERNEL);
@@ -1534,6 +1533,33 @@ static void classify8023(struct sk_buff *skb)
1534 } 1533 }
1535} 1534}
1536 1535
1536static void iwm_rx_process_amsdu(struct iwm_priv *iwm, struct sk_buff *skb)
1537{
1538 struct wireless_dev *wdev = iwm_to_wdev(iwm);
1539 struct net_device *ndev = iwm_to_ndev(iwm);
1540 struct sk_buff_head list;
1541 struct sk_buff *frame;
1542
1543 IWM_HEXDUMP(iwm, DBG, RX, "A-MSDU: ", skb->data, skb->len);
1544
1545 __skb_queue_head_init(&list);
1546 ieee80211_amsdu_to_8023s(skb, &list, ndev->dev_addr, wdev->iftype, 0);
1547
1548 while ((frame = __skb_dequeue(&list))) {
1549 ndev->stats.rx_packets++;
1550 ndev->stats.rx_bytes += frame->len;
1551
1552 frame->protocol = eth_type_trans(frame, ndev);
1553 frame->ip_summed = CHECKSUM_NONE;
1554 memset(frame->cb, 0, sizeof(frame->cb));
1555
1556 if (netif_rx_ni(frame) == NET_RX_DROP) {
1557 IWM_ERR(iwm, "Packet dropped\n");
1558 ndev->stats.rx_dropped++;
1559 }
1560 }
1561}
1562
1537static void iwm_rx_process_packet(struct iwm_priv *iwm, 1563static void iwm_rx_process_packet(struct iwm_priv *iwm,
1538 struct iwm_rx_packet *packet, 1564 struct iwm_rx_packet *packet,
1539 struct iwm_rx_ticket_node *ticket_node) 1565 struct iwm_rx_ticket_node *ticket_node)
@@ -1548,25 +1574,34 @@ static void iwm_rx_process_packet(struct iwm_priv *iwm,
1548 switch (le16_to_cpu(ticket_node->ticket->action)) { 1574 switch (le16_to_cpu(ticket_node->ticket->action)) {
1549 case IWM_RX_TICKET_RELEASE: 1575 case IWM_RX_TICKET_RELEASE:
1550 IWM_DBG_RX(iwm, DBG, "RELEASE packet\n"); 1576 IWM_DBG_RX(iwm, DBG, "RELEASE packet\n");
1551 classify8023(skb); 1577
1552 iwm_rx_adjust_packet(iwm, packet, ticket_node); 1578 iwm_rx_adjust_packet(iwm, packet, ticket_node);
1579 skb->dev = iwm_to_ndev(iwm);
1580 classify8023(skb);
1581
1582 if (le16_to_cpu(ticket_node->ticket->flags) &
1583 IWM_RX_TICKET_AMSDU_MSK) {
1584 iwm_rx_process_amsdu(iwm, skb);
1585 break;
1586 }
1587
1553 ret = ieee80211_data_to_8023(skb, ndev->dev_addr, wdev->iftype); 1588 ret = ieee80211_data_to_8023(skb, ndev->dev_addr, wdev->iftype);
1554 if (ret < 0) { 1589 if (ret < 0) {
1555 IWM_DBG_RX(iwm, DBG, "Couldn't convert 802.11 header - " 1590 IWM_DBG_RX(iwm, DBG, "Couldn't convert 802.11 header - "
1556 "%d\n", ret); 1591 "%d\n", ret);
1592 kfree_skb(packet->skb);
1557 break; 1593 break;
1558 } 1594 }
1559 1595
1560 IWM_HEXDUMP(iwm, DBG, RX, "802.3: ", skb->data, skb->len); 1596 IWM_HEXDUMP(iwm, DBG, RX, "802.3: ", skb->data, skb->len);
1561 1597
1562 skb->dev = iwm_to_ndev(iwm); 1598 ndev->stats.rx_packets++;
1599 ndev->stats.rx_bytes += skb->len;
1600
1563 skb->protocol = eth_type_trans(skb, ndev); 1601 skb->protocol = eth_type_trans(skb, ndev);
1564 skb->ip_summed = CHECKSUM_NONE; 1602 skb->ip_summed = CHECKSUM_NONE;
1565 memset(skb->cb, 0, sizeof(skb->cb)); 1603 memset(skb->cb, 0, sizeof(skb->cb));
1566 1604
1567 ndev->stats.rx_packets++;
1568 ndev->stats.rx_bytes += skb->len;
1569
1570 if (netif_rx_ni(skb) == NET_RX_DROP) { 1605 if (netif_rx_ni(skb) == NET_RX_DROP) {
1571 IWM_ERR(iwm, "Packet dropped\n"); 1606 IWM_ERR(iwm, "Packet dropped\n");
1572 ndev->stats.rx_dropped++; 1607 ndev->stats.rx_dropped++;
diff --git a/drivers/net/wireless/libertas/Kconfig b/drivers/net/wireless/libertas/Kconfig
index 30aa9d48d67e..0485c9957575 100644
--- a/drivers/net/wireless/libertas/Kconfig
+++ b/drivers/net/wireless/libertas/Kconfig
@@ -37,3 +37,9 @@ config LIBERTAS_DEBUG
37 depends on LIBERTAS 37 depends on LIBERTAS
38 ---help--- 38 ---help---
39 Debugging support. 39 Debugging support.
40
41config LIBERTAS_MESH
42 bool "Enable mesh support"
43 depends on LIBERTAS
44 help
45 This enables Libertas' MESH support, used by e.g. the OLPC people.
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index b188cd97a053..45e870e33117 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -5,11 +5,11 @@ libertas-y += cmdresp.o
5libertas-y += debugfs.o 5libertas-y += debugfs.o
6libertas-y += ethtool.o 6libertas-y += ethtool.o
7libertas-y += main.o 7libertas-y += main.o
8libertas-y += mesh.o
9libertas-y += rx.o 8libertas-y += rx.o
10libertas-y += scan.o 9libertas-y += scan.o
11libertas-y += tx.o 10libertas-y += tx.o
12libertas-y += wext.o 11libertas-y += wext.o
12libertas-$(CONFIG_LIBERTAS_MESH) += mesh.o
13 13
14usb8xxx-objs += if_usb.o 14usb8xxx-objs += if_usb.o
15libertas_cs-objs += if_cs.o 15libertas_cs-objs += if_cs.o
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index 751067369ba8..f03d5e4e59c3 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -390,10 +390,8 @@ int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
390 cmd.enablehwauto = cpu_to_le16(priv->enablehwauto); 390 cmd.enablehwauto = cpu_to_le16(priv->enablehwauto);
391 cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto); 391 cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto);
392 ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd); 392 ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd);
393 if (!ret && cmd_action == CMD_ACT_GET) { 393 if (!ret && cmd_action == CMD_ACT_GET)
394 priv->ratebitmap = le16_to_cpu(cmd.bitmap);
395 priv->enablehwauto = le16_to_cpu(cmd.enablehwauto); 394 priv->enablehwauto = le16_to_cpu(cmd.enablehwauto);
396 }
397 395
398 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); 396 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
399 return ret; 397 return ret;
@@ -807,8 +805,7 @@ static int lbs_try_associate(struct lbs_private *priv,
807 } 805 }
808 806
809 /* Use short preamble only when both the BSS and firmware support it */ 807 /* Use short preamble only when both the BSS and firmware support it */
810 if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) && 808 if (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
811 (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE))
812 preamble = RADIO_PREAMBLE_SHORT; 809 preamble = RADIO_PREAMBLE_SHORT;
813 810
814 ret = lbs_set_radio(priv, preamble, 1); 811 ret = lbs_set_radio(priv, preamble, 1);
@@ -939,8 +936,7 @@ static int lbs_adhoc_join(struct lbs_private *priv,
939 } 936 }
940 937
941 /* Use short preamble only when both the BSS and firmware support it */ 938 /* Use short preamble only when both the BSS and firmware support it */
942 if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) && 939 if (bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
943 (bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)) {
944 lbs_deb_join("AdhocJoin: Short preamble\n"); 940 lbs_deb_join("AdhocJoin: Short preamble\n");
945 preamble = RADIO_PREAMBLE_SHORT; 941 preamble = RADIO_PREAMBLE_SHORT;
946 } 942 }
@@ -1049,7 +1045,7 @@ static int lbs_adhoc_start(struct lbs_private *priv,
1049 struct assoc_request *assoc_req) 1045 struct assoc_request *assoc_req)
1050{ 1046{
1051 struct cmd_ds_802_11_ad_hoc_start cmd; 1047 struct cmd_ds_802_11_ad_hoc_start cmd;
1052 u8 preamble = RADIO_PREAMBLE_LONG; 1048 u8 preamble = RADIO_PREAMBLE_SHORT;
1053 size_t ratesize = 0; 1049 size_t ratesize = 0;
1054 u16 tmpcap = 0; 1050 u16 tmpcap = 0;
1055 int ret = 0; 1051 int ret = 0;
@@ -1057,11 +1053,6 @@ static int lbs_adhoc_start(struct lbs_private *priv,
1057 1053
1058 lbs_deb_enter(LBS_DEB_ASSOC); 1054 lbs_deb_enter(LBS_DEB_ASSOC);
1059 1055
1060 if (priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
1061 lbs_deb_join("ADHOC_START: Will use short preamble\n");
1062 preamble = RADIO_PREAMBLE_SHORT;
1063 }
1064
1065 ret = lbs_set_radio(priv, preamble, 1); 1056 ret = lbs_set_radio(priv, preamble, 1);
1066 if (ret) 1057 if (ret)
1067 goto out; 1058 goto out;
@@ -1169,11 +1160,11 @@ int lbs_adhoc_stop(struct lbs_private *priv)
1169static inline int match_bss_no_security(struct lbs_802_11_security *secinfo, 1160static inline int match_bss_no_security(struct lbs_802_11_security *secinfo,
1170 struct bss_descriptor *match_bss) 1161 struct bss_descriptor *match_bss)
1171{ 1162{
1172 if (!secinfo->wep_enabled && !secinfo->WPAenabled 1163 if (!secinfo->wep_enabled &&
1173 && !secinfo->WPA2enabled 1164 !secinfo->WPAenabled && !secinfo->WPA2enabled &&
1174 && match_bss->wpa_ie[0] != WLAN_EID_GENERIC 1165 match_bss->wpa_ie[0] != WLAN_EID_GENERIC &&
1175 && match_bss->rsn_ie[0] != WLAN_EID_RSN 1166 match_bss->rsn_ie[0] != WLAN_EID_RSN &&
1176 && !(match_bss->capability & WLAN_CAPABILITY_PRIVACY)) 1167 !(match_bss->capability & WLAN_CAPABILITY_PRIVACY))
1177 return 1; 1168 return 1;
1178 else 1169 else
1179 return 0; 1170 return 0;
@@ -1182,9 +1173,9 @@ static inline int match_bss_no_security(struct lbs_802_11_security *secinfo,
1182static inline int match_bss_static_wep(struct lbs_802_11_security *secinfo, 1173static inline int match_bss_static_wep(struct lbs_802_11_security *secinfo,
1183 struct bss_descriptor *match_bss) 1174 struct bss_descriptor *match_bss)
1184{ 1175{
1185 if (secinfo->wep_enabled && !secinfo->WPAenabled 1176 if (secinfo->wep_enabled &&
1186 && !secinfo->WPA2enabled 1177 !secinfo->WPAenabled && !secinfo->WPA2enabled &&
1187 && (match_bss->capability & WLAN_CAPABILITY_PRIVACY)) 1178 (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
1188 return 1; 1179 return 1;
1189 else 1180 else
1190 return 0; 1181 return 0;
@@ -1193,8 +1184,8 @@ static inline int match_bss_static_wep(struct lbs_802_11_security *secinfo,
1193static inline int match_bss_wpa(struct lbs_802_11_security *secinfo, 1184static inline int match_bss_wpa(struct lbs_802_11_security *secinfo,
1194 struct bss_descriptor *match_bss) 1185 struct bss_descriptor *match_bss)
1195{ 1186{
1196 if (!secinfo->wep_enabled && secinfo->WPAenabled 1187 if (!secinfo->wep_enabled && secinfo->WPAenabled &&
1197 && (match_bss->wpa_ie[0] == WLAN_EID_GENERIC) 1188 (match_bss->wpa_ie[0] == WLAN_EID_GENERIC)
1198 /* privacy bit may NOT be set in some APs like LinkSys WRT54G 1189 /* privacy bit may NOT be set in some APs like LinkSys WRT54G
1199 && (match_bss->capability & WLAN_CAPABILITY_PRIVACY) */ 1190 && (match_bss->capability & WLAN_CAPABILITY_PRIVACY) */
1200 ) 1191 )
@@ -1219,11 +1210,11 @@ static inline int match_bss_wpa2(struct lbs_802_11_security *secinfo,
1219static inline int match_bss_dynamic_wep(struct lbs_802_11_security *secinfo, 1210static inline int match_bss_dynamic_wep(struct lbs_802_11_security *secinfo,
1220 struct bss_descriptor *match_bss) 1211 struct bss_descriptor *match_bss)
1221{ 1212{
1222 if (!secinfo->wep_enabled && !secinfo->WPAenabled 1213 if (!secinfo->wep_enabled &&
1223 && !secinfo->WPA2enabled 1214 !secinfo->WPAenabled && !secinfo->WPA2enabled &&
1224 && (match_bss->wpa_ie[0] != WLAN_EID_GENERIC) 1215 (match_bss->wpa_ie[0] != WLAN_EID_GENERIC) &&
1225 && (match_bss->rsn_ie[0] != WLAN_EID_RSN) 1216 (match_bss->rsn_ie[0] != WLAN_EID_RSN) &&
1226 && (match_bss->capability & WLAN_CAPABILITY_PRIVACY)) 1217 (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
1227 return 1; 1218 return 1;
1228 else 1219 else
1229 return 0; 1220 return 0;
@@ -1534,8 +1525,8 @@ static int assoc_helper_associate(struct lbs_private *priv,
1534 /* If we're given and 'any' BSSID, try associating based on SSID */ 1525 /* If we're given and 'any' BSSID, try associating based on SSID */
1535 1526
1536 if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) { 1527 if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) {
1537 if (compare_ether_addr(bssid_any, assoc_req->bssid) 1528 if (compare_ether_addr(bssid_any, assoc_req->bssid) &&
1538 && compare_ether_addr(bssid_off, assoc_req->bssid)) { 1529 compare_ether_addr(bssid_off, assoc_req->bssid)) {
1539 ret = assoc_helper_bssid(priv, assoc_req); 1530 ret = assoc_helper_bssid(priv, assoc_req);
1540 done = 1; 1531 done = 1;
1541 } 1532 }
@@ -1621,11 +1612,9 @@ static int assoc_helper_channel(struct lbs_private *priv,
1621 goto restore_mesh; 1612 goto restore_mesh;
1622 } 1613 }
1623 1614
1624 if ( assoc_req->secinfo.wep_enabled 1615 if (assoc_req->secinfo.wep_enabled &&
1625 && (assoc_req->wep_keys[0].len 1616 (assoc_req->wep_keys[0].len || assoc_req->wep_keys[1].len ||
1626 || assoc_req->wep_keys[1].len 1617 assoc_req->wep_keys[2].len || assoc_req->wep_keys[3].len)) {
1627 || assoc_req->wep_keys[2].len
1628 || assoc_req->wep_keys[3].len)) {
1629 /* Make sure WEP keys are re-sent to firmware */ 1618 /* Make sure WEP keys are re-sent to firmware */
1630 set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags); 1619 set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags);
1631 } 1620 }
@@ -1992,14 +1981,14 @@ void lbs_association_worker(struct work_struct *work)
1992 assoc_req->secinfo.auth_mode); 1981 assoc_req->secinfo.auth_mode);
1993 1982
1994 /* If 'any' SSID was specified, find an SSID to associate with */ 1983 /* If 'any' SSID was specified, find an SSID to associate with */
1995 if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags) 1984 if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags) &&
1996 && !assoc_req->ssid_len) 1985 !assoc_req->ssid_len)
1997 find_any_ssid = 1; 1986 find_any_ssid = 1;
1998 1987
1999 /* But don't use 'any' SSID if there's a valid locked BSSID to use */ 1988 /* But don't use 'any' SSID if there's a valid locked BSSID to use */
2000 if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) { 1989 if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) {
2001 if (compare_ether_addr(assoc_req->bssid, bssid_any) 1990 if (compare_ether_addr(assoc_req->bssid, bssid_any) &&
2002 && compare_ether_addr(assoc_req->bssid, bssid_off)) 1991 compare_ether_addr(assoc_req->bssid, bssid_off))
2003 find_any_ssid = 0; 1992 find_any_ssid = 0;
2004 } 1993 }
2005 1994
@@ -2061,13 +2050,6 @@ void lbs_association_worker(struct work_struct *work)
2061 goto out; 2050 goto out;
2062 } 2051 }
2063 2052
2064 if ( test_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags)
2065 || test_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags)) {
2066 ret = assoc_helper_wep_keys(priv, assoc_req);
2067 if (ret)
2068 goto out;
2069 }
2070
2071 if (test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) { 2053 if (test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) {
2072 ret = assoc_helper_secinfo(priv, assoc_req); 2054 ret = assoc_helper_secinfo(priv, assoc_req);
2073 if (ret) 2055 if (ret)
@@ -2080,18 +2062,31 @@ void lbs_association_worker(struct work_struct *work)
2080 goto out; 2062 goto out;
2081 } 2063 }
2082 2064
2083 if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags) 2065 /*
2084 || test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) { 2066 * v10 FW wants WPA keys to be set/cleared before WEP key operations,
2067 * otherwise it will fail to correctly associate to WEP networks.
2068 * Other firmware versions don't appear to care.
2069 */
2070 if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags) ||
2071 test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) {
2085 ret = assoc_helper_wpa_keys(priv, assoc_req); 2072 ret = assoc_helper_wpa_keys(priv, assoc_req);
2086 if (ret) 2073 if (ret)
2087 goto out; 2074 goto out;
2088 } 2075 }
2089 2076
2077 if (test_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags) ||
2078 test_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags)) {
2079 ret = assoc_helper_wep_keys(priv, assoc_req);
2080 if (ret)
2081 goto out;
2082 }
2083
2084
2090 /* SSID/BSSID should be the _last_ config option set, because they 2085 /* SSID/BSSID should be the _last_ config option set, because they
2091 * trigger the association attempt. 2086 * trigger the association attempt.
2092 */ 2087 */
2093 if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags) 2088 if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags) ||
2094 || test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) { 2089 test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
2095 int success = 1; 2090 int success = 1;
2096 2091
2097 ret = assoc_helper_associate(priv, assoc_req); 2092 ret = assoc_helper_associate(priv, assoc_req);
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 42611bea76a3..82371ef39524 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -143,19 +143,6 @@ int lbs_update_hw_spec(struct lbs_private *priv)
143 lbs_deb_cmd("GET_HW_SPEC: hardware interface 0x%x, hardware spec 0x%04x\n", 143 lbs_deb_cmd("GET_HW_SPEC: hardware interface 0x%x, hardware spec 0x%04x\n",
144 cmd.hwifversion, cmd.version); 144 cmd.hwifversion, cmd.version);
145 145
146 /* Determine mesh_fw_ver from fwrelease and fwcapinfo */
147 /* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */
148 /* 5.110.22 have mesh command with 0xa3 command id */
149 /* 10.0.0.p0 FW brings in mesh config command with different id */
150 /* Check FW version MSB and initialize mesh_fw_ver */
151 if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5)
152 priv->mesh_fw_ver = MESH_FW_OLD;
153 else if ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
154 (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK))
155 priv->mesh_fw_ver = MESH_FW_NEW;
156 else
157 priv->mesh_fw_ver = MESH_NONE;
158
159 /* Clamp region code to 8-bit since FW spec indicates that it should 146 /* Clamp region code to 8-bit since FW spec indicates that it should
160 * only ever be 8-bit, even though the field size is 16-bit. Some firmware 147 * only ever be 8-bit, even though the field size is 16-bit. Some firmware
161 * returns non-zero high 8 bits here. 148 * returns non-zero high 8 bits here.
@@ -855,9 +842,6 @@ int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on)
855 if (priv->fwrelease < 0x09000000) { 842 if (priv->fwrelease < 0x09000000) {
856 switch (preamble) { 843 switch (preamble) {
857 case RADIO_PREAMBLE_SHORT: 844 case RADIO_PREAMBLE_SHORT:
858 if (!(priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE))
859 goto out;
860 /* Fall through */
861 case RADIO_PREAMBLE_AUTO: 845 case RADIO_PREAMBLE_AUTO:
862 case RADIO_PREAMBLE_LONG: 846 case RADIO_PREAMBLE_LONG:
863 cmd.control = cpu_to_le16(preamble); 847 cmd.control = cpu_to_le16(preamble);
@@ -1011,6 +995,8 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1011 ret = 0; 995 ret = 0;
1012 break; 996 break;
1013 997
998#ifdef CONFIG_LIBERTAS_MESH
999
1014 case CMD_BT_ACCESS: 1000 case CMD_BT_ACCESS:
1015 ret = lbs_cmd_bt_access(cmdptr, cmd_action, pdata_buf); 1001 ret = lbs_cmd_bt_access(cmdptr, cmd_action, pdata_buf);
1016 break; 1002 break;
@@ -1019,6 +1005,8 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1019 ret = lbs_cmd_fwt_access(cmdptr, cmd_action, pdata_buf); 1005 ret = lbs_cmd_fwt_access(cmdptr, cmd_action, pdata_buf);
1020 break; 1006 break;
1021 1007
1008#endif
1009
1022 case CMD_802_11_BEACON_CTRL: 1010 case CMD_802_11_BEACON_CTRL:
1023 ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action); 1011 ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action);
1024 break; 1012 break;
@@ -1317,7 +1305,7 @@ int lbs_execute_next_command(struct lbs_private *priv)
1317 if ((priv->psmode != LBS802_11POWERMODECAM) && 1305 if ((priv->psmode != LBS802_11POWERMODECAM) &&
1318 (priv->psstate == PS_STATE_FULL_POWER) && 1306 (priv->psstate == PS_STATE_FULL_POWER) &&
1319 ((priv->connect_status == LBS_CONNECTED) || 1307 ((priv->connect_status == LBS_CONNECTED) ||
1320 (priv->mesh_connect_status == LBS_CONNECTED))) { 1308 lbs_mesh_connected(priv))) {
1321 if (priv->secinfo.WPAenabled || 1309 if (priv->secinfo.WPAenabled ||
1322 priv->secinfo.WPA2enabled) { 1310 priv->secinfo.WPA2enabled) {
1323 /* check for valid WPA group keys */ 1311 /* check for valid WPA group keys */
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index 2862748aef70..cb4138a55fdf 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -110,18 +110,6 @@ int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val);
110int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val); 110int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val);
111 111
112 112
113/* Mesh related */
114
115int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
116 struct cmd_ds_mesh_access *cmd);
117
118int lbs_mesh_config_send(struct lbs_private *priv,
119 struct cmd_ds_mesh_config *cmd,
120 uint16_t action, uint16_t type);
121
122int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
123
124
125/* Commands only used in wext.c, assoc. and scan.c */ 113/* Commands only used in wext.c, assoc. and scan.c */
126 114
127int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0, 115int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 21d57690c20a..e7470442f76b 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -240,11 +240,6 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len)
240 /* Now we got response from FW, cancel the command timer */ 240 /* Now we got response from FW, cancel the command timer */
241 del_timer(&priv->command_timer); 241 del_timer(&priv->command_timer);
242 priv->cmd_timed_out = 0; 242 priv->cmd_timed_out = 0;
243 if (priv->nr_retries) {
244 lbs_pr_info("Received result %x to command %x after %d retries\n",
245 result, curcmd, priv->nr_retries);
246 priv->nr_retries = 0;
247 }
248 243
249 /* Store the response code to cur_cmd_retcode. */ 244 /* Store the response code to cur_cmd_retcode. */
250 priv->cur_cmd_retcode = result; 245 priv->cur_cmd_retcode = result;
@@ -485,20 +480,8 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
485 break; 480 break;
486 481
487 case MACREG_INT_CODE_MESH_AUTO_STARTED: 482 case MACREG_INT_CODE_MESH_AUTO_STARTED:
488 /* Ignore spurious autostart events if autostart is disabled */ 483 /* Ignore spurious autostart events */
489 if (!priv->mesh_autostart_enabled) { 484 lbs_pr_info("EVENT: MESH_AUTO_STARTED (ignoring)\n");
490 lbs_pr_info("EVENT: MESH_AUTO_STARTED (ignoring)\n");
491 break;
492 }
493 lbs_pr_info("EVENT: MESH_AUTO_STARTED\n");
494 priv->mesh_connect_status = LBS_CONNECTED;
495 if (priv->mesh_open) {
496 netif_carrier_on(priv->mesh_dev);
497 if (!priv->tx_pending_len)
498 netif_wake_queue(priv->mesh_dev);
499 }
500 priv->mode = IW_MODE_ADHOC;
501 schedule_work(&priv->sync_channel);
502 break; 485 break;
503 486
504 default: 487 default:
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index 6b6ea9f7bf5b..ea3f10ef4e00 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -397,13 +397,6 @@ enum KEY_INFO_WPA {
397 KEY_INFO_WPA_ENABLED = 0x04 397 KEY_INFO_WPA_ENABLED = 0x04
398}; 398};
399 399
400/** mesh_fw_ver */
401enum _mesh_fw_ver {
402 MESH_NONE = 0, /* MESH is not supported */
403 MESH_FW_OLD, /* MESH is supported in FW V5 */
404 MESH_FW_NEW, /* MESH is supported in FW V10 and newer */
405};
406
407/* Default values for fwt commands. */ 400/* Default values for fwt commands. */
408#define FWT_DEFAULT_METRIC 0 401#define FWT_DEFAULT_METRIC 0
409#define FWT_DEFAULT_DIR 1 402#define FWT_DEFAULT_DIR 1
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 05bb298dfae9..6977ee820214 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -39,15 +39,14 @@ struct lbs_private {
39 39
40 /* Mesh */ 40 /* Mesh */
41 struct net_device *mesh_dev; /* Virtual device */ 41 struct net_device *mesh_dev; /* Virtual device */
42#ifdef CONFIG_LIBERTAS_MESH
42 u32 mesh_connect_status; 43 u32 mesh_connect_status;
43 struct lbs_mesh_stats mstats; 44 struct lbs_mesh_stats mstats;
44 int mesh_open; 45 int mesh_open;
45 int mesh_fw_ver;
46 int mesh_autostart_enabled;
47 uint16_t mesh_tlv; 46 uint16_t mesh_tlv;
48 u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1]; 47 u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1];
49 u8 mesh_ssid_len; 48 u8 mesh_ssid_len;
50 struct work_struct sync_channel; 49#endif
51 50
52 /* Monitor mode */ 51 /* Monitor mode */
53 struct net_device *rtap_net_dev; 52 struct net_device *rtap_net_dev;
@@ -110,7 +109,6 @@ struct lbs_private {
110 struct list_head cmdpendingq; /* pending command buffers */ 109 struct list_head cmdpendingq; /* pending command buffers */
111 wait_queue_head_t cmd_pending; 110 wait_queue_head_t cmd_pending;
112 struct timer_list command_timer; 111 struct timer_list command_timer;
113 int nr_retries;
114 int cmd_timed_out; 112 int cmd_timed_out;
115 113
116 /* Command responses sent from the hardware to the driver */ 114 /* Command responses sent from the hardware to the driver */
@@ -176,9 +174,7 @@ struct lbs_private {
176 struct bss_descriptor *networks; 174 struct bss_descriptor *networks;
177 struct assoc_request * pending_assoc_req; 175 struct assoc_request * pending_assoc_req;
178 struct assoc_request * in_progress_assoc_req; 176 struct assoc_request * in_progress_assoc_req;
179 u16 capability;
180 uint16_t enablehwauto; 177 uint16_t enablehwauto;
181 uint16_t ratebitmap;
182 178
183 /* ADHOC */ 179 /* ADHOC */
184 u16 beacon_period; 180 u16 beacon_period;
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 63d020374c2b..3804a58d7f4e 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -114,9 +114,11 @@ const struct ethtool_ops lbs_ethtool_ops = {
114 .get_drvinfo = lbs_ethtool_get_drvinfo, 114 .get_drvinfo = lbs_ethtool_get_drvinfo,
115 .get_eeprom = lbs_ethtool_get_eeprom, 115 .get_eeprom = lbs_ethtool_get_eeprom,
116 .get_eeprom_len = lbs_ethtool_get_eeprom_len, 116 .get_eeprom_len = lbs_ethtool_get_eeprom_len,
117#ifdef CONFIG_LIBERTAS_MESH
117 .get_sset_count = lbs_mesh_ethtool_get_sset_count, 118 .get_sset_count = lbs_mesh_ethtool_get_sset_count,
118 .get_ethtool_stats = lbs_mesh_ethtool_get_stats, 119 .get_ethtool_stats = lbs_mesh_ethtool_get_stats,
119 .get_strings = lbs_mesh_ethtool_get_strings, 120 .get_strings = lbs_mesh_ethtool_get_strings,
121#endif
120 .get_wol = lbs_ethtool_get_wol, 122 .get_wol = lbs_ethtool_get_wol,
121 .set_wol = lbs_ethtool_set_wol, 123 .set_wol = lbs_ethtool_set_wol,
122}; 124};
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index bf4bfbae6227..3ea03f259ee7 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -23,6 +23,7 @@
23#include <linux/kthread.h> 23#include <linux/kthread.h>
24#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/netdevice.h> 25#include <linux/netdevice.h>
26#include <linux/semaphore.h>
26#include <linux/spi/libertas_spi.h> 27#include <linux/spi/libertas_spi.h>
27#include <linux/spi/spi.h> 28#include <linux/spi/spi.h>
28 29
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index c2975c8e2f21..28a1c9d1627a 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -123,7 +123,7 @@ static ssize_t lbs_rtap_set(struct device *dev,
123 if (priv->monitormode == monitor_mode) 123 if (priv->monitormode == monitor_mode)
124 return strlen(buf); 124 return strlen(buf);
125 if (!priv->monitormode) { 125 if (!priv->monitormode) {
126 if (priv->infra_open || priv->mesh_open) 126 if (priv->infra_open || lbs_mesh_open(priv))
127 return -EBUSY; 127 return -EBUSY;
128 if (priv->mode == IW_MODE_INFRA) 128 if (priv->mode == IW_MODE_INFRA)
129 lbs_cmd_80211_deauthenticate(priv, 129 lbs_cmd_80211_deauthenticate(priv,
@@ -319,15 +319,18 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd,
319{ 319{
320 int i = nr_addrs; 320 int i = nr_addrs;
321 struct dev_mc_list *mc_list; 321 struct dev_mc_list *mc_list;
322 int cnt;
322 323
323 if ((dev->flags & (IFF_UP|IFF_MULTICAST)) != (IFF_UP|IFF_MULTICAST)) 324 if ((dev->flags & (IFF_UP|IFF_MULTICAST)) != (IFF_UP|IFF_MULTICAST))
324 return nr_addrs; 325 return nr_addrs;
325 326
326 netif_addr_lock_bh(dev); 327 netif_addr_lock_bh(dev);
327 for (mc_list = dev->mc_list; mc_list; mc_list = mc_list->next) { 328 cnt = netdev_mc_count(dev);
329 netdev_for_each_mc_addr(mc_list, dev) {
328 if (mac_in_list(cmd->maclist, nr_addrs, mc_list->dmi_addr)) { 330 if (mac_in_list(cmd->maclist, nr_addrs, mc_list->dmi_addr)) {
329 lbs_deb_net("mcast address %s:%pM skipped\n", dev->name, 331 lbs_deb_net("mcast address %s:%pM skipped\n", dev->name,
330 mc_list->dmi_addr); 332 mc_list->dmi_addr);
333 cnt--;
331 continue; 334 continue;
332 } 335 }
333 336
@@ -337,9 +340,10 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd,
337 lbs_deb_net("mcast address %s:%pM added to filter\n", dev->name, 340 lbs_deb_net("mcast address %s:%pM added to filter\n", dev->name,
338 mc_list->dmi_addr); 341 mc_list->dmi_addr);
339 i++; 342 i++;
343 cnt--;
340 } 344 }
341 netif_addr_unlock_bh(dev); 345 netif_addr_unlock_bh(dev);
342 if (mc_list) 346 if (cnt)
343 return -EOVERFLOW; 347 return -EOVERFLOW;
344 348
345 return i; 349 return i;
@@ -536,31 +540,14 @@ static int lbs_thread(void *data)
536 if (priv->cmd_timed_out && priv->cur_cmd) { 540 if (priv->cmd_timed_out && priv->cur_cmd) {
537 struct cmd_ctrl_node *cmdnode = priv->cur_cmd; 541 struct cmd_ctrl_node *cmdnode = priv->cur_cmd;
538 542
539 if (++priv->nr_retries > 3) { 543 lbs_pr_info("Timeout submitting command 0x%04x\n",
540 lbs_pr_info("Excessive timeouts submitting " 544 le16_to_cpu(cmdnode->cmdbuf->command));
541 "command 0x%04x\n", 545 lbs_complete_command(priv, cmdnode, -ETIMEDOUT);
542 le16_to_cpu(cmdnode->cmdbuf->command)); 546 if (priv->reset_card)
543 lbs_complete_command(priv, cmdnode, -ETIMEDOUT); 547 priv->reset_card(priv);
544 priv->nr_retries = 0;
545 if (priv->reset_card)
546 priv->reset_card(priv);
547 } else {
548 priv->cur_cmd = NULL;
549 priv->dnld_sent = DNLD_RES_RECEIVED;
550 lbs_pr_info("requeueing command 0x%04x due "
551 "to timeout (#%d)\n",
552 le16_to_cpu(cmdnode->cmdbuf->command),
553 priv->nr_retries);
554
555 /* Stick it back at the _top_ of the pending queue
556 for immediate resubmission */
557 list_add(&cmdnode->list, &priv->cmdpendingq);
558 }
559 } 548 }
560 priv->cmd_timed_out = 0; 549 priv->cmd_timed_out = 0;
561 550
562
563
564 if (!priv->fw_ready) 551 if (!priv->fw_ready)
565 continue; 552 continue;
566 553
@@ -622,7 +609,7 @@ static int lbs_thread(void *data)
622 if (priv->connect_status == LBS_CONNECTED) 609 if (priv->connect_status == LBS_CONNECTED)
623 netif_wake_queue(priv->dev); 610 netif_wake_queue(priv->dev);
624 if (priv->mesh_dev && 611 if (priv->mesh_dev &&
625 priv->mesh_connect_status == LBS_CONNECTED) 612 lbs_mesh_connected(priv))
626 netif_wake_queue(priv->mesh_dev); 613 netif_wake_queue(priv->mesh_dev);
627 } 614 }
628 } 615 }
@@ -732,7 +719,7 @@ done:
732 * This function handles the timeout of command sending. 719 * This function handles the timeout of command sending.
733 * It will re-send the same command again. 720 * It will re-send the same command again.
734 */ 721 */
735static void command_timer_fn(unsigned long data) 722static void lbs_cmd_timeout_handler(unsigned long data)
736{ 723{
737 struct lbs_private *priv = (struct lbs_private *)data; 724 struct lbs_private *priv = (struct lbs_private *)data;
738 unsigned long flags; 725 unsigned long flags;
@@ -809,18 +796,6 @@ int lbs_exit_auto_deep_sleep(struct lbs_private *priv)
809 return 0; 796 return 0;
810} 797}
811 798
812static void lbs_sync_channel_worker(struct work_struct *work)
813{
814 struct lbs_private *priv = container_of(work, struct lbs_private,
815 sync_channel);
816
817 lbs_deb_enter(LBS_DEB_MAIN);
818 if (lbs_update_channel(priv))
819 lbs_pr_info("Channel synchronization failed.");
820 lbs_deb_leave(LBS_DEB_MAIN);
821}
822
823
824static int lbs_init_adapter(struct lbs_private *priv) 799static int lbs_init_adapter(struct lbs_private *priv)
825{ 800{
826 size_t bufsize; 801 size_t bufsize;
@@ -848,14 +823,12 @@ static int lbs_init_adapter(struct lbs_private *priv)
848 memset(priv->current_addr, 0xff, ETH_ALEN); 823 memset(priv->current_addr, 0xff, ETH_ALEN);
849 824
850 priv->connect_status = LBS_DISCONNECTED; 825 priv->connect_status = LBS_DISCONNECTED;
851 priv->mesh_connect_status = LBS_DISCONNECTED;
852 priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; 826 priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
853 priv->mode = IW_MODE_INFRA; 827 priv->mode = IW_MODE_INFRA;
854 priv->channel = DEFAULT_AD_HOC_CHANNEL; 828 priv->channel = DEFAULT_AD_HOC_CHANNEL;
855 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON; 829 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
856 priv->radio_on = 1; 830 priv->radio_on = 1;
857 priv->enablehwauto = 1; 831 priv->enablehwauto = 1;
858 priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE;
859 priv->psmode = LBS802_11POWERMODECAM; 832 priv->psmode = LBS802_11POWERMODECAM;
860 priv->psstate = PS_STATE_FULL_POWER; 833 priv->psstate = PS_STATE_FULL_POWER;
861 priv->is_deep_sleep = 0; 834 priv->is_deep_sleep = 0;
@@ -865,7 +838,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
865 838
866 mutex_init(&priv->lock); 839 mutex_init(&priv->lock);
867 840
868 setup_timer(&priv->command_timer, command_timer_fn, 841 setup_timer(&priv->command_timer, lbs_cmd_timeout_handler,
869 (unsigned long)priv); 842 (unsigned long)priv);
870 setup_timer(&priv->auto_deepsleep_timer, auto_deepsleep_timer_fn, 843 setup_timer(&priv->auto_deepsleep_timer, auto_deepsleep_timer_fn,
871 (unsigned long)priv); 844 (unsigned long)priv);
@@ -998,11 +971,6 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
998 INIT_DELAYED_WORK(&priv->assoc_work, lbs_association_worker); 971 INIT_DELAYED_WORK(&priv->assoc_work, lbs_association_worker);
999 INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker); 972 INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker);
1000 INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker); 973 INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker);
1001 INIT_WORK(&priv->sync_channel, lbs_sync_channel_worker);
1002
1003 priv->mesh_open = 0;
1004 sprintf(priv->mesh_ssid, "mesh");
1005 priv->mesh_ssid_len = 4;
1006 974
1007 priv->wol_criteria = 0xffffffff; 975 priv->wol_criteria = 0xffffffff;
1008 priv->wol_gpio = 0xff; 976 priv->wol_gpio = 0xff;
@@ -1076,6 +1044,17 @@ void lbs_remove_card(struct lbs_private *priv)
1076EXPORT_SYMBOL_GPL(lbs_remove_card); 1044EXPORT_SYMBOL_GPL(lbs_remove_card);
1077 1045
1078 1046
1047static int lbs_rtap_supported(struct lbs_private *priv)
1048{
1049 if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5)
1050 return 1;
1051
1052 /* newer firmware use a capability mask */
1053 return ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
1054 (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK));
1055}
1056
1057
1079int lbs_start_card(struct lbs_private *priv) 1058int lbs_start_card(struct lbs_private *priv)
1080{ 1059{
1081 struct net_device *dev = priv->dev; 1060 struct net_device *dev = priv->dev;
@@ -1095,12 +1074,14 @@ int lbs_start_card(struct lbs_private *priv)
1095 1074
1096 lbs_update_channel(priv); 1075 lbs_update_channel(priv);
1097 1076
1077 lbs_init_mesh(priv);
1078
1098 /* 1079 /*
1099 * While rtap isn't related to mesh, only mesh-enabled 1080 * While rtap isn't related to mesh, only mesh-enabled
1100 * firmware implements the rtap functionality via 1081 * firmware implements the rtap functionality via
1101 * CMD_802_11_MONITOR_MODE. 1082 * CMD_802_11_MONITOR_MODE.
1102 */ 1083 */
1103 if (lbs_init_mesh(priv)) { 1084 if (lbs_rtap_supported(priv)) {
1104 if (device_create_file(&dev->dev, &dev_attr_lbs_rtap)) 1085 if (device_create_file(&dev->dev, &dev_attr_lbs_rtap))
1105 lbs_pr_err("cannot register lbs_rtap attribute\n"); 1086 lbs_pr_err("cannot register lbs_rtap attribute\n");
1106 } 1087 }
@@ -1134,7 +1115,9 @@ void lbs_stop_card(struct lbs_private *priv)
1134 netif_carrier_off(dev); 1115 netif_carrier_off(dev);
1135 1116
1136 lbs_debugfs_remove_one(priv); 1117 lbs_debugfs_remove_one(priv);
1137 if (lbs_deinit_mesh(priv)) 1118 lbs_deinit_mesh(priv);
1119
1120 if (lbs_rtap_supported(priv))
1138 device_remove_file(&dev->dev, &dev_attr_lbs_rtap); 1121 device_remove_file(&dev->dev, &dev_attr_lbs_rtap);
1139 1122
1140 /* Delete the timeout of the currently processing command */ 1123 /* Delete the timeout of the currently processing command */
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 92b7a357a5e4..e385af1f4583 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -1,4 +1,3 @@
1#include <linux/moduleparam.h>
2#include <linux/delay.h> 1#include <linux/delay.h>
3#include <linux/etherdevice.h> 2#include <linux/etherdevice.h>
4#include <linux/netdevice.h> 3#include <linux/netdevice.h>
@@ -197,7 +196,14 @@ int lbs_init_mesh(struct lbs_private *priv)
197 196
198 lbs_deb_enter(LBS_DEB_MESH); 197 lbs_deb_enter(LBS_DEB_MESH);
199 198
200 if (priv->mesh_fw_ver == MESH_FW_OLD) { 199 priv->mesh_connect_status = LBS_DISCONNECTED;
200
201 /* Determine mesh_fw_ver from fwrelease and fwcapinfo */
202 /* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */
203 /* 5.110.22 have mesh command with 0xa3 command id */
204 /* 10.0.0.p0 FW brings in mesh config command with different id */
205 /* Check FW version MSB and initialize mesh_fw_ver */
206 if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5) {
201 /* Enable mesh, if supported, and work out which TLV it uses. 207 /* Enable mesh, if supported, and work out which TLV it uses.
202 0x100 + 291 is an unofficial value used in 5.110.20.pXX 208 0x100 + 291 is an unofficial value used in 5.110.20.pXX
203 0x100 + 37 is the official value used in 5.110.21.pXX 209 0x100 + 37 is the official value used in 5.110.21.pXX
@@ -219,7 +225,9 @@ int lbs_init_mesh(struct lbs_private *priv)
219 priv->channel)) 225 priv->channel))
220 priv->mesh_tlv = 0; 226 priv->mesh_tlv = 0;
221 } 227 }
222 } else if (priv->mesh_fw_ver == MESH_FW_NEW) { 228 } else
229 if ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
230 (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK)) {
223 /* 10.0.0.pXX new firmwares should succeed with TLV 231 /* 10.0.0.pXX new firmwares should succeed with TLV
224 * 0x100+37; Do not invoke command with old TLV. 232 * 0x100+37; Do not invoke command with old TLV.
225 */ 233 */
@@ -228,7 +236,12 @@ int lbs_init_mesh(struct lbs_private *priv)
228 priv->channel)) 236 priv->channel))
229 priv->mesh_tlv = 0; 237 priv->mesh_tlv = 0;
230 } 238 }
239
240
231 if (priv->mesh_tlv) { 241 if (priv->mesh_tlv) {
242 sprintf(priv->mesh_ssid, "mesh");
243 priv->mesh_ssid_len = 4;
244
232 lbs_add_mesh(priv); 245 lbs_add_mesh(priv);
233 246
234 if (device_create_file(&dev->dev, &dev_attr_lbs_mesh)) 247 if (device_create_file(&dev->dev, &dev_attr_lbs_mesh))
@@ -416,10 +429,10 @@ struct net_device *lbs_mesh_set_dev(struct lbs_private *priv,
416 struct net_device *dev, struct rxpd *rxpd) 429 struct net_device *dev, struct rxpd *rxpd)
417{ 430{
418 if (priv->mesh_dev) { 431 if (priv->mesh_dev) {
419 if (priv->mesh_fw_ver == MESH_FW_OLD) { 432 if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID) {
420 if (rxpd->rx_control & RxPD_MESH_FRAME) 433 if (rxpd->rx_control & RxPD_MESH_FRAME)
421 dev = priv->mesh_dev; 434 dev = priv->mesh_dev;
422 } else if (priv->mesh_fw_ver == MESH_FW_NEW) { 435 } else if (priv->mesh_tlv == TLV_TYPE_MESH_ID) {
423 if (rxpd->u.bss.bss_num == MESH_IFACE_ID) 436 if (rxpd->u.bss.bss_num == MESH_IFACE_ID)
424 dev = priv->mesh_dev; 437 dev = priv->mesh_dev;
425 } 438 }
@@ -432,9 +445,9 @@ void lbs_mesh_set_txpd(struct lbs_private *priv,
432 struct net_device *dev, struct txpd *txpd) 445 struct net_device *dev, struct txpd *txpd)
433{ 446{
434 if (dev == priv->mesh_dev) { 447 if (dev == priv->mesh_dev) {
435 if (priv->mesh_fw_ver == MESH_FW_OLD) 448 if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID)
436 txpd->tx_control |= cpu_to_le32(TxPD_MESH_FRAME); 449 txpd->tx_control |= cpu_to_le32(TxPD_MESH_FRAME);
437 else if (priv->mesh_fw_ver == MESH_FW_NEW) 450 else if (priv->mesh_tlv == TLV_TYPE_MESH_ID)
438 txpd->u.bss.bss_num = MESH_IFACE_ID; 451 txpd->u.bss.bss_num = MESH_IFACE_ID;
439 } 452 }
440} 453}
@@ -538,7 +551,7 @@ static int __lbs_mesh_config_send(struct lbs_private *priv,
538 * Command id is 0xac for v10 FW along with mesh interface 551 * Command id is 0xac for v10 FW along with mesh interface
539 * id in bits 14-13-12. 552 * id in bits 14-13-12.
540 */ 553 */
541 if (priv->mesh_fw_ver == MESH_FW_NEW) 554 if (priv->mesh_tlv == TLV_TYPE_MESH_ID)
542 command = CMD_MESH_CONFIG | 555 command = CMD_MESH_CONFIG |
543 (MESH_IFACE_ID << MESH_IFACE_BIT_OFFSET); 556 (MESH_IFACE_ID << MESH_IFACE_BIT_OFFSET);
544 557
diff --git a/drivers/net/wireless/libertas/mesh.h b/drivers/net/wireless/libertas/mesh.h
index fea9b5d005fc..e2573303a328 100644
--- a/drivers/net/wireless/libertas/mesh.h
+++ b/drivers/net/wireless/libertas/mesh.h
@@ -9,6 +9,8 @@
9#include <net/lib80211.h> 9#include <net/lib80211.h>
10 10
11 11
12#ifdef CONFIG_LIBERTAS_MESH
13
12/* Mesh statistics */ 14/* Mesh statistics */
13struct lbs_mesh_stats { 15struct lbs_mesh_stats {
14 u32 fwd_bcast_cnt; /* Fwd: Broadcast counter */ 16 u32 fwd_bcast_cnt; /* Fwd: Broadcast counter */
@@ -46,11 +48,20 @@ void lbs_mesh_set_txpd(struct lbs_private *priv,
46/* Command handling */ 48/* Command handling */
47 49
48struct cmd_ds_command; 50struct cmd_ds_command;
51struct cmd_ds_mesh_access;
52struct cmd_ds_mesh_config;
49 53
50int lbs_cmd_bt_access(struct cmd_ds_command *cmd, 54int lbs_cmd_bt_access(struct cmd_ds_command *cmd,
51 u16 cmd_action, void *pdata_buf); 55 u16 cmd_action, void *pdata_buf);
52int lbs_cmd_fwt_access(struct cmd_ds_command *cmd, 56int lbs_cmd_fwt_access(struct cmd_ds_command *cmd,
53 u16 cmd_action, void *pdata_buf); 57 u16 cmd_action, void *pdata_buf);
58int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
59 struct cmd_ds_mesh_access *cmd);
60int lbs_mesh_config_send(struct lbs_private *priv,
61 struct cmd_ds_mesh_config *cmd,
62 uint16_t action, uint16_t type);
63int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
64
54 65
55 66
56/* Persistent configuration */ 67/* Persistent configuration */
@@ -75,4 +86,25 @@ void lbs_mesh_ethtool_get_strings(struct net_device *dev,
75 uint32_t stringset, uint8_t *s); 86 uint32_t stringset, uint8_t *s);
76 87
77 88
89/* Accessors */
90
91#define lbs_mesh_open(priv) (priv->mesh_open)
92#define lbs_mesh_connected(priv) (priv->mesh_connect_status == LBS_CONNECTED)
93
94#else
95
96#define lbs_init_mesh(priv)
97#define lbs_deinit_mesh(priv)
98#define lbs_add_mesh(priv)
99#define lbs_remove_mesh(priv)
100#define lbs_mesh_set_dev(priv, dev, rxpd) (dev)
101#define lbs_mesh_set_txpd(priv, dev, txpd)
102#define lbs_mesh_config(priv, enable, chan)
103#define lbs_mesh_open(priv) (0)
104#define lbs_mesh_connected(priv) (0)
105
106#endif
107
108
109
78#endif 110#endif
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index b0b1c7841500..220361e69cd3 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -635,7 +635,7 @@ out:
635 if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len) 635 if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len)
636 netif_wake_queue(priv->dev); 636 netif_wake_queue(priv->dev);
637 637
638 if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED) && 638 if (priv->mesh_dev && lbs_mesh_connected(priv) &&
639 !priv->tx_pending_len) 639 !priv->tx_pending_len)
640 netif_wake_queue(priv->mesh_dev); 640 netif_wake_queue(priv->mesh_dev);
641 641
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 315d1ce286ca..52d244ea3d97 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -198,7 +198,7 @@ void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count)
198 if (priv->connect_status == LBS_CONNECTED) 198 if (priv->connect_status == LBS_CONNECTED)
199 netif_wake_queue(priv->dev); 199 netif_wake_queue(priv->dev);
200 200
201 if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED)) 201 if (priv->mesh_dev && lbs_mesh_connected(priv))
202 netif_wake_queue(priv->mesh_dev); 202 netif_wake_queue(priv->mesh_dev);
203} 203}
204EXPORT_SYMBOL_GPL(lbs_send_tx_feedback); 204EXPORT_SYMBOL_GPL(lbs_send_tx_feedback);
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index 4b1aab593a84..71f88a08e090 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -192,7 +192,7 @@ static void copy_active_data_rates(struct lbs_private *priv, u8 *rates)
192 lbs_deb_enter(LBS_DEB_WEXT); 192 lbs_deb_enter(LBS_DEB_WEXT);
193 193
194 if ((priv->connect_status != LBS_CONNECTED) && 194 if ((priv->connect_status != LBS_CONNECTED) &&
195 (priv->mesh_connect_status != LBS_CONNECTED)) 195 !lbs_mesh_connected(priv))
196 memcpy(rates, lbs_bg_rates, MAX_RATES); 196 memcpy(rates, lbs_bg_rates, MAX_RATES);
197 else 197 else
198 memcpy(rates, priv->curbssparams.rates, MAX_RATES); 198 memcpy(rates, priv->curbssparams.rates, MAX_RATES);
@@ -298,6 +298,7 @@ static int lbs_get_nick(struct net_device *dev, struct iw_request_info *info,
298 return 0; 298 return 0;
299} 299}
300 300
301#ifdef CONFIG_LIBERTAS_MESH
301static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info, 302static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
302 struct iw_point *dwrq, char *extra) 303 struct iw_point *dwrq, char *extra)
303{ 304{
@@ -307,7 +308,7 @@ static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
307 308
308 /* Use nickname to indicate that mesh is on */ 309 /* Use nickname to indicate that mesh is on */
309 310
310 if (priv->mesh_connect_status == LBS_CONNECTED) { 311 if (lbs_mesh_connected(priv)) {
311 strncpy(extra, "Mesh", 12); 312 strncpy(extra, "Mesh", 12);
312 extra[12] = '\0'; 313 extra[12] = '\0';
313 dwrq->length = strlen(extra); 314 dwrq->length = strlen(extra);
@@ -321,6 +322,7 @@ static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
321 lbs_deb_leave(LBS_DEB_WEXT); 322 lbs_deb_leave(LBS_DEB_WEXT);
322 return 0; 323 return 0;
323} 324}
325#endif
324 326
325static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info, 327static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info,
326 struct iw_param *vwrq, char *extra) 328 struct iw_param *vwrq, char *extra)
@@ -422,6 +424,7 @@ static int lbs_get_mode(struct net_device *dev,
422 return 0; 424 return 0;
423} 425}
424 426
427#ifdef CONFIG_LIBERTAS_MESH
425static int mesh_wlan_get_mode(struct net_device *dev, 428static int mesh_wlan_get_mode(struct net_device *dev,
426 struct iw_request_info *info, u32 * uwrq, 429 struct iw_request_info *info, u32 * uwrq,
427 char *extra) 430 char *extra)
@@ -433,6 +436,7 @@ static int mesh_wlan_get_mode(struct net_device *dev,
433 lbs_deb_leave(LBS_DEB_WEXT); 436 lbs_deb_leave(LBS_DEB_WEXT);
434 return 0; 437 return 0;
435} 438}
439#endif
436 440
437static int lbs_get_txpow(struct net_device *dev, 441static int lbs_get_txpow(struct net_device *dev,
438 struct iw_request_info *info, 442 struct iw_request_info *info,
@@ -863,7 +867,7 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
863 867
864 /* If we're not associated, all quality values are meaningless */ 868 /* If we're not associated, all quality values are meaningless */
865 if ((priv->connect_status != LBS_CONNECTED) && 869 if ((priv->connect_status != LBS_CONNECTED) &&
866 (priv->mesh_connect_status != LBS_CONNECTED)) 870 !lbs_mesh_connected(priv))
867 goto out; 871 goto out;
868 872
869 /* Quality by RSSI */ 873 /* Quality by RSSI */
@@ -1010,6 +1014,7 @@ out:
1010 return ret; 1014 return ret;
1011} 1015}
1012 1016
1017#ifdef CONFIG_LIBERTAS_MESH
1013static int lbs_mesh_set_freq(struct net_device *dev, 1018static int lbs_mesh_set_freq(struct net_device *dev,
1014 struct iw_request_info *info, 1019 struct iw_request_info *info,
1015 struct iw_freq *fwrq, char *extra) 1020 struct iw_freq *fwrq, char *extra)
@@ -1061,6 +1066,7 @@ out:
1061 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); 1066 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
1062 return ret; 1067 return ret;
1063} 1068}
1069#endif
1064 1070
1065static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info, 1071static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info,
1066 struct iw_param *vwrq, char *extra) 1072 struct iw_param *vwrq, char *extra)
@@ -2108,6 +2114,7 @@ out:
2108 return ret; 2114 return ret;
2109} 2115}
2110 2116
2117#ifdef CONFIG_LIBERTAS_MESH
2111static int lbs_mesh_get_essid(struct net_device *dev, 2118static int lbs_mesh_get_essid(struct net_device *dev,
2112 struct iw_request_info *info, 2119 struct iw_request_info *info,
2113 struct iw_point *dwrq, char *extra) 2120 struct iw_point *dwrq, char *extra)
@@ -2161,6 +2168,7 @@ static int lbs_mesh_set_essid(struct net_device *dev,
2161 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); 2168 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
2162 return ret; 2169 return ret;
2163} 2170}
2171#endif
2164 2172
2165/** 2173/**
2166 * @brief Connect to the AP or Ad-hoc Network with specific bssid 2174 * @brief Connect to the AP or Ad-hoc Network with specific bssid
@@ -2267,7 +2275,13 @@ static const iw_handler lbs_handler[] = {
2267 (iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */ 2275 (iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */
2268 (iw_handler) NULL, /* SIOCSIWPMKSA */ 2276 (iw_handler) NULL, /* SIOCSIWPMKSA */
2269}; 2277};
2278struct iw_handler_def lbs_handler_def = {
2279 .num_standard = ARRAY_SIZE(lbs_handler),
2280 .standard = (iw_handler *) lbs_handler,
2281 .get_wireless_stats = lbs_get_wireless_stats,
2282};
2270 2283
2284#ifdef CONFIG_LIBERTAS_MESH
2271static const iw_handler mesh_wlan_handler[] = { 2285static const iw_handler mesh_wlan_handler[] = {
2272 (iw_handler) NULL, /* SIOCSIWCOMMIT */ 2286 (iw_handler) NULL, /* SIOCSIWCOMMIT */
2273 (iw_handler) lbs_get_name, /* SIOCGIWNAME */ 2287 (iw_handler) lbs_get_name, /* SIOCGIWNAME */
@@ -2325,14 +2339,10 @@ static const iw_handler mesh_wlan_handler[] = {
2325 (iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */ 2339 (iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */
2326 (iw_handler) NULL, /* SIOCSIWPMKSA */ 2340 (iw_handler) NULL, /* SIOCSIWPMKSA */
2327}; 2341};
2328struct iw_handler_def lbs_handler_def = {
2329 .num_standard = ARRAY_SIZE(lbs_handler),
2330 .standard = (iw_handler *) lbs_handler,
2331 .get_wireless_stats = lbs_get_wireless_stats,
2332};
2333 2342
2334struct iw_handler_def mesh_handler_def = { 2343struct iw_handler_def mesh_handler_def = {
2335 .num_standard = ARRAY_SIZE(mesh_wlan_handler), 2344 .num_standard = ARRAY_SIZE(mesh_wlan_handler),
2336 .standard = (iw_handler *) mesh_wlan_handler, 2345 .standard = (iw_handler *) mesh_wlan_handler,
2337 .get_wireless_stats = lbs_get_wireless_stats, 2346 .get_wireless_stats = lbs_get_wireless_stats,
2338}; 2347};
2348#endif
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 26a1abd5bb03..6ab30033c26c 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -318,14 +318,14 @@ static void lbtf_op_stop(struct ieee80211_hw *hw)
318} 318}
319 319
320static int lbtf_op_add_interface(struct ieee80211_hw *hw, 320static int lbtf_op_add_interface(struct ieee80211_hw *hw,
321 struct ieee80211_if_init_conf *conf) 321 struct ieee80211_vif *vif)
322{ 322{
323 struct lbtf_private *priv = hw->priv; 323 struct lbtf_private *priv = hw->priv;
324 if (priv->vif != NULL) 324 if (priv->vif != NULL)
325 return -EOPNOTSUPP; 325 return -EOPNOTSUPP;
326 326
327 priv->vif = conf->vif; 327 priv->vif = vif;
328 switch (conf->type) { 328 switch (vif->type) {
329 case NL80211_IFTYPE_MESH_POINT: 329 case NL80211_IFTYPE_MESH_POINT:
330 case NL80211_IFTYPE_AP: 330 case NL80211_IFTYPE_AP:
331 lbtf_set_mode(priv, LBTF_AP_MODE); 331 lbtf_set_mode(priv, LBTF_AP_MODE);
@@ -337,12 +337,12 @@ static int lbtf_op_add_interface(struct ieee80211_hw *hw,
337 priv->vif = NULL; 337 priv->vif = NULL;
338 return -EOPNOTSUPP; 338 return -EOPNOTSUPP;
339 } 339 }
340 lbtf_set_mac_address(priv, (u8 *) conf->mac_addr); 340 lbtf_set_mac_address(priv, (u8 *) vif->addr);
341 return 0; 341 return 0;
342} 342}
343 343
344static void lbtf_op_remove_interface(struct ieee80211_hw *hw, 344static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
345 struct ieee80211_if_init_conf *conf) 345 struct ieee80211_vif *vif)
346{ 346{
347 struct lbtf_private *priv = hw->priv; 347 struct lbtf_private *priv = hw->priv;
348 348
@@ -555,6 +555,9 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
555 priv->band.n_channels = ARRAY_SIZE(lbtf_channels); 555 priv->band.n_channels = ARRAY_SIZE(lbtf_channels);
556 priv->band.channels = priv->channels; 556 priv->band.channels = priv->channels;
557 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; 557 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
558 hw->wiphy->interface_modes =
559 BIT(NL80211_IFTYPE_STATION) |
560 BIT(NL80211_IFTYPE_ADHOC);
558 skb_queue_head_init(&priv->bc_ps_buf); 561 skb_queue_head_init(&priv->bc_ps_buf);
559 562
560 SET_IEEE80211_DEV(hw, dmdev); 563 SET_IEEE80211_DEV(hw, dmdev);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 88e41176e7fd..6ea77e95277b 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -32,6 +32,10 @@ static int radios = 2;
32module_param(radios, int, 0444); 32module_param(radios, int, 0444);
33MODULE_PARM_DESC(radios, "Number of simulated radios"); 33MODULE_PARM_DESC(radios, "Number of simulated radios");
34 34
35static bool fake_hw_scan;
36module_param(fake_hw_scan, bool, 0444);
37MODULE_PARM_DESC(fake_hw_scan, "Install fake (no-op) hw-scan handler");
38
35/** 39/**
36 * enum hwsim_regtest - the type of regulatory tests we offer 40 * enum hwsim_regtest - the type of regulatory tests we offer
37 * 41 *
@@ -281,6 +285,8 @@ struct mac80211_hwsim_data {
281 struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)]; 285 struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)];
282 struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)]; 286 struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
283 287
288 struct mac_address addresses[2];
289
284 struct ieee80211_channel *channel; 290 struct ieee80211_channel *channel;
285 unsigned long beacon_int; /* in jiffies unit */ 291 unsigned long beacon_int; /* in jiffies unit */
286 unsigned int rx_filter; 292 unsigned int rx_filter;
@@ -436,6 +442,38 @@ static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data,
436} 442}
437 443
438 444
445struct mac80211_hwsim_addr_match_data {
446 bool ret;
447 const u8 *addr;
448};
449
450static void mac80211_hwsim_addr_iter(void *data, u8 *mac,
451 struct ieee80211_vif *vif)
452{
453 struct mac80211_hwsim_addr_match_data *md = data;
454 if (memcmp(mac, md->addr, ETH_ALEN) == 0)
455 md->ret = true;
456}
457
458
459static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data,
460 const u8 *addr)
461{
462 struct mac80211_hwsim_addr_match_data md;
463
464 if (memcmp(addr, data->hw->wiphy->perm_addr, ETH_ALEN) == 0)
465 return true;
466
467 md.ret = false;
468 md.addr = addr;
469 ieee80211_iterate_active_interfaces_atomic(data->hw,
470 mac80211_hwsim_addr_iter,
471 &md);
472
473 return md.ret;
474}
475
476
439static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, 477static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
440 struct sk_buff *skb) 478 struct sk_buff *skb)
441{ 479{
@@ -488,8 +526,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
488 if (nskb == NULL) 526 if (nskb == NULL)
489 continue; 527 continue;
490 528
491 if (memcmp(hdr->addr1, data2->hw->wiphy->perm_addr, 529 if (mac80211_hwsim_addr_match(data2, hdr->addr1))
492 ETH_ALEN) == 0)
493 ack = true; 530 ack = true;
494 memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status)); 531 memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
495 ieee80211_rx_irqsafe(data2->hw, nskb); 532 ieee80211_rx_irqsafe(data2->hw, nskb);
@@ -553,24 +590,24 @@ static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
553 590
554 591
555static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw, 592static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
556 struct ieee80211_if_init_conf *conf) 593 struct ieee80211_vif *vif)
557{ 594{
558 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n", 595 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n",
559 wiphy_name(hw->wiphy), __func__, conf->type, 596 wiphy_name(hw->wiphy), __func__, vif->type,
560 conf->mac_addr); 597 vif->addr);
561 hwsim_set_magic(conf->vif); 598 hwsim_set_magic(vif);
562 return 0; 599 return 0;
563} 600}
564 601
565 602
566static void mac80211_hwsim_remove_interface( 603static void mac80211_hwsim_remove_interface(
567 struct ieee80211_hw *hw, struct ieee80211_if_init_conf *conf) 604 struct ieee80211_hw *hw, struct ieee80211_vif *vif)
568{ 605{
569 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n", 606 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n",
570 wiphy_name(hw->wiphy), __func__, conf->type, 607 wiphy_name(hw->wiphy), __func__, vif->type,
571 conf->mac_addr); 608 vif->addr);
572 hwsim_check_magic(conf->vif); 609 hwsim_check_magic(vif);
573 hwsim_clear_magic(conf->vif); 610 hwsim_clear_magic(vif);
574} 611}
575 612
576 613
@@ -618,12 +655,26 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
618{ 655{
619 struct mac80211_hwsim_data *data = hw->priv; 656 struct mac80211_hwsim_data *data = hw->priv;
620 struct ieee80211_conf *conf = &hw->conf; 657 struct ieee80211_conf *conf = &hw->conf;
621 658 static const char *chantypes[4] = {
622 printk(KERN_DEBUG "%s:%s (freq=%d idle=%d ps=%d)\n", 659 [NL80211_CHAN_NO_HT] = "noht",
660 [NL80211_CHAN_HT20] = "ht20",
661 [NL80211_CHAN_HT40MINUS] = "ht40-",
662 [NL80211_CHAN_HT40PLUS] = "ht40+",
663 };
664 static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = {
665 [IEEE80211_SMPS_AUTOMATIC] = "auto",
666 [IEEE80211_SMPS_OFF] = "off",
667 [IEEE80211_SMPS_STATIC] = "static",
668 [IEEE80211_SMPS_DYNAMIC] = "dynamic",
669 };
670
671 printk(KERN_DEBUG "%s:%s (freq=%d/%s idle=%d ps=%d smps=%s)\n",
623 wiphy_name(hw->wiphy), __func__, 672 wiphy_name(hw->wiphy), __func__,
624 conf->channel->center_freq, 673 conf->channel->center_freq,
674 chantypes[conf->channel_type],
625 !!(conf->flags & IEEE80211_CONF_IDLE), 675 !!(conf->flags & IEEE80211_CONF_IDLE),
626 !!(conf->flags & IEEE80211_CONF_PS)); 676 !!(conf->flags & IEEE80211_CONF_PS),
677 smps_modes[conf->smps_mode]);
627 678
628 data->idle = !!(conf->flags & IEEE80211_CONF_IDLE); 679 data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
629 680
@@ -720,23 +771,41 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
720 } 771 }
721} 772}
722 773
774static int mac80211_hwsim_sta_add(struct ieee80211_hw *hw,
775 struct ieee80211_vif *vif,
776 struct ieee80211_sta *sta)
777{
778 hwsim_check_magic(vif);
779 hwsim_set_sta_magic(sta);
780
781 return 0;
782}
783
784static int mac80211_hwsim_sta_remove(struct ieee80211_hw *hw,
785 struct ieee80211_vif *vif,
786 struct ieee80211_sta *sta)
787{
788 hwsim_check_magic(vif);
789 hwsim_clear_sta_magic(sta);
790
791 return 0;
792}
793
723static void mac80211_hwsim_sta_notify(struct ieee80211_hw *hw, 794static void mac80211_hwsim_sta_notify(struct ieee80211_hw *hw,
724 struct ieee80211_vif *vif, 795 struct ieee80211_vif *vif,
725 enum sta_notify_cmd cmd, 796 enum sta_notify_cmd cmd,
726 struct ieee80211_sta *sta) 797 struct ieee80211_sta *sta)
727{ 798{
728 hwsim_check_magic(vif); 799 hwsim_check_magic(vif);
800
729 switch (cmd) { 801 switch (cmd) {
730 case STA_NOTIFY_ADD:
731 hwsim_set_sta_magic(sta);
732 break;
733 case STA_NOTIFY_REMOVE:
734 hwsim_clear_sta_magic(sta);
735 break;
736 case STA_NOTIFY_SLEEP: 802 case STA_NOTIFY_SLEEP:
737 case STA_NOTIFY_AWAKE: 803 case STA_NOTIFY_AWAKE:
738 /* TODO: make good use of these flags */ 804 /* TODO: make good use of these flags */
739 break; 805 break;
806 default:
807 WARN(1, "Invalid sta notify: %d\n", cmd);
808 break;
740 } 809 }
741} 810}
742 811
@@ -827,7 +896,77 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
827} 896}
828#endif 897#endif
829 898
830static const struct ieee80211_ops mac80211_hwsim_ops = 899static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
900 struct ieee80211_vif *vif,
901 enum ieee80211_ampdu_mlme_action action,
902 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
903{
904 switch (action) {
905 case IEEE80211_AMPDU_TX_START:
906 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
907 break;
908 case IEEE80211_AMPDU_TX_STOP:
909 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
910 break;
911 case IEEE80211_AMPDU_TX_OPERATIONAL:
912 break;
913 case IEEE80211_AMPDU_RX_START:
914 case IEEE80211_AMPDU_RX_STOP:
915 break;
916 default:
917 return -EOPNOTSUPP;
918 }
919
920 return 0;
921}
922
923static void mac80211_hwsim_flush(struct ieee80211_hw *hw, bool drop)
924{
925 /*
926 * In this special case, there's nothing we need to
927 * do because hwsim does transmission synchronously.
928 * In the future, when it does transmissions via
929 * userspace, we may need to do something.
930 */
931}
932
933struct hw_scan_done {
934 struct delayed_work w;
935 struct ieee80211_hw *hw;
936};
937
938static void hw_scan_done(struct work_struct *work)
939{
940 struct hw_scan_done *hsd =
941 container_of(work, struct hw_scan_done, w.work);
942
943 ieee80211_scan_completed(hsd->hw, false);
944 kfree(hsd);
945}
946
947static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
948 struct cfg80211_scan_request *req)
949{
950 struct hw_scan_done *hsd = kzalloc(sizeof(*hsd), GFP_KERNEL);
951 int i;
952
953 if (!hsd)
954 return -ENOMEM;
955
956 hsd->hw = hw;
957 INIT_DELAYED_WORK(&hsd->w, hw_scan_done);
958
959 printk(KERN_DEBUG "hwsim scan request\n");
960 for (i = 0; i < req->n_channels; i++)
961 printk(KERN_DEBUG "hwsim scan freq %d\n",
962 req->channels[i]->center_freq);
963
964 ieee80211_queue_delayed_work(hw, &hsd->w, 2 * HZ);
965
966 return 0;
967}
968
969static struct ieee80211_ops mac80211_hwsim_ops =
831{ 970{
832 .tx = mac80211_hwsim_tx, 971 .tx = mac80211_hwsim_tx,
833 .start = mac80211_hwsim_start, 972 .start = mac80211_hwsim_start,
@@ -837,10 +976,14 @@ static const struct ieee80211_ops mac80211_hwsim_ops =
837 .config = mac80211_hwsim_config, 976 .config = mac80211_hwsim_config,
838 .configure_filter = mac80211_hwsim_configure_filter, 977 .configure_filter = mac80211_hwsim_configure_filter,
839 .bss_info_changed = mac80211_hwsim_bss_info_changed, 978 .bss_info_changed = mac80211_hwsim_bss_info_changed,
979 .sta_add = mac80211_hwsim_sta_add,
980 .sta_remove = mac80211_hwsim_sta_remove,
840 .sta_notify = mac80211_hwsim_sta_notify, 981 .sta_notify = mac80211_hwsim_sta_notify,
841 .set_tim = mac80211_hwsim_set_tim, 982 .set_tim = mac80211_hwsim_set_tim,
842 .conf_tx = mac80211_hwsim_conf_tx, 983 .conf_tx = mac80211_hwsim_conf_tx,
843 CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd) 984 CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd)
985 .ampdu_action = mac80211_hwsim_ampdu_action,
986 .flush = mac80211_hwsim_flush,
844}; 987};
845 988
846 989
@@ -1035,6 +1178,9 @@ static int __init init_mac80211_hwsim(void)
1035 if (radios < 1 || radios > 100) 1178 if (radios < 1 || radios > 100)
1036 return -EINVAL; 1179 return -EINVAL;
1037 1180
1181 if (fake_hw_scan)
1182 mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
1183
1038 spin_lock_init(&hwsim_radio_lock); 1184 spin_lock_init(&hwsim_radio_lock);
1039 INIT_LIST_HEAD(&hwsim_radios); 1185 INIT_LIST_HEAD(&hwsim_radios);
1040 1186
@@ -1072,7 +1218,11 @@ static int __init init_mac80211_hwsim(void)
1072 SET_IEEE80211_DEV(hw, data->dev); 1218 SET_IEEE80211_DEV(hw, data->dev);
1073 addr[3] = i >> 8; 1219 addr[3] = i >> 8;
1074 addr[4] = i; 1220 addr[4] = i;
1075 SET_IEEE80211_PERM_ADDR(hw, addr); 1221 memcpy(data->addresses[0].addr, addr, ETH_ALEN);
1222 memcpy(data->addresses[1].addr, addr, ETH_ALEN);
1223 data->addresses[1].addr[0] |= 0x40;
1224 hw->wiphy->n_addresses = 2;
1225 hw->wiphy->addresses = data->addresses;
1076 1226
1077 hw->channel_change_time = 1; 1227 hw->channel_change_time = 1;
1078 hw->queues = 4; 1228 hw->queues = 4;
@@ -1082,7 +1232,9 @@ static int __init init_mac80211_hwsim(void)
1082 BIT(NL80211_IFTYPE_MESH_POINT); 1232 BIT(NL80211_IFTYPE_MESH_POINT);
1083 1233
1084 hw->flags = IEEE80211_HW_MFP_CAPABLE | 1234 hw->flags = IEEE80211_HW_MFP_CAPABLE |
1085 IEEE80211_HW_SIGNAL_DBM; 1235 IEEE80211_HW_SIGNAL_DBM |
1236 IEEE80211_HW_SUPPORTS_STATIC_SMPS |
1237 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
1086 1238
1087 /* ask mac80211 to reserve space for magic */ 1239 /* ask mac80211 to reserve space for magic */
1088 hw->vif_data_size = sizeof(struct hwsim_vif_priv); 1240 hw->vif_data_size = sizeof(struct hwsim_vif_priv);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 59f92105b0c2..ac65e13eb0de 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -2,7 +2,7 @@
2 * drivers/net/wireless/mwl8k.c 2 * drivers/net/wireless/mwl8k.c
3 * Driver for Marvell TOPDOG 802.11 Wireless cards 3 * Driver for Marvell TOPDOG 802.11 Wireless cards
4 * 4 *
5 * Copyright (C) 2008-2009 Marvell Semiconductor Inc. 5 * Copyright (C) 2008, 2009, 2010 Marvell Semiconductor Inc.
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -26,7 +26,7 @@
26 26
27#define MWL8K_DESC "Marvell TOPDOG(R) 802.11 Wireless Network Driver" 27#define MWL8K_DESC "Marvell TOPDOG(R) 802.11 Wireless Network Driver"
28#define MWL8K_NAME KBUILD_MODNAME 28#define MWL8K_NAME KBUILD_MODNAME
29#define MWL8K_VERSION "0.10" 29#define MWL8K_VERSION "0.12"
30 30
31/* Register definitions */ 31/* Register definitions */
32#define MWL8K_HIU_GEN_PTR 0x00000c10 32#define MWL8K_HIU_GEN_PTR 0x00000c10
@@ -92,8 +92,7 @@ struct mwl8k_device_info {
92 char *part_name; 92 char *part_name;
93 char *helper_image; 93 char *helper_image;
94 char *fw_image; 94 char *fw_image;
95 struct rxd_ops *rxd_ops; 95 struct rxd_ops *ap_rxd_ops;
96 u16 modes;
97}; 96};
98 97
99struct mwl8k_rx_queue { 98struct mwl8k_rx_queue {
@@ -120,34 +119,36 @@ struct mwl8k_tx_queue {
120 /* sw appends here */ 119 /* sw appends here */
121 int tail; 120 int tail;
122 121
123 struct ieee80211_tx_queue_stats stats; 122 unsigned int len;
124 struct mwl8k_tx_desc *txd; 123 struct mwl8k_tx_desc *txd;
125 dma_addr_t txd_dma; 124 dma_addr_t txd_dma;
126 struct sk_buff **skb; 125 struct sk_buff **skb;
127}; 126};
128 127
129/* Pointers to the firmware data and meta information about it. */ 128struct mwl8k_priv {
130struct mwl8k_firmware { 129 struct ieee80211_hw *hw;
131 /* Boot helper code */ 130 struct pci_dev *pdev;
132 struct firmware *helper;
133 131
134 /* Microcode */ 132 struct mwl8k_device_info *device_info;
135 struct firmware *ucode;
136};
137 133
138struct mwl8k_priv {
139 void __iomem *sram; 134 void __iomem *sram;
140 void __iomem *regs; 135 void __iomem *regs;
141 struct ieee80211_hw *hw;
142 136
143 struct pci_dev *pdev; 137 /* firmware */
138 struct firmware *fw_helper;
139 struct firmware *fw_ucode;
144 140
145 struct mwl8k_device_info *device_info; 141 /* hardware/firmware parameters */
146 bool ap_fw; 142 bool ap_fw;
147 struct rxd_ops *rxd_ops; 143 struct rxd_ops *rxd_ops;
148 144 struct ieee80211_supported_band band_24;
149 /* firmware files and meta data */ 145 struct ieee80211_channel channels_24[14];
150 struct mwl8k_firmware fw; 146 struct ieee80211_rate rates_24[14];
147 struct ieee80211_supported_band band_50;
148 struct ieee80211_channel channels_50[4];
149 struct ieee80211_rate rates_50[9];
150 u32 ap_macids_supported;
151 u32 sta_macids_supported;
151 152
152 /* firmware access */ 153 /* firmware access */
153 struct mutex fw_mutex; 154 struct mutex fw_mutex;
@@ -161,9 +162,9 @@ struct mwl8k_priv {
161 /* TX quiesce completion, protected by fw_mutex and tx_lock */ 162 /* TX quiesce completion, protected by fw_mutex and tx_lock */
162 struct completion *tx_wait; 163 struct completion *tx_wait;
163 164
164 struct ieee80211_vif *vif; 165 /* List of interfaces. */
165 166 u32 macids_used;
166 struct ieee80211_channel *current_channel; 167 struct list_head vif_list;
167 168
168 /* power management status cookie from firmware */ 169 /* power management status cookie from firmware */
169 u32 *cookie; 170 u32 *cookie;
@@ -182,11 +183,6 @@ struct mwl8k_priv {
182 struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES]; 183 struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES];
183 struct mwl8k_tx_queue txq[MWL8K_TX_QUEUES]; 184 struct mwl8k_tx_queue txq[MWL8K_TX_QUEUES];
184 185
185 /* PHY parameters */
186 struct ieee80211_supported_band band;
187 struct ieee80211_channel channels[14];
188 struct ieee80211_rate rates[14];
189
190 bool radio_on; 186 bool radio_on;
191 bool radio_short_preamble; 187 bool radio_short_preamble;
192 bool sniffer_enabled; 188 bool sniffer_enabled;
@@ -205,32 +201,33 @@ struct mwl8k_priv {
205 */ 201 */
206 struct work_struct finalize_join_worker; 202 struct work_struct finalize_join_worker;
207 203
208 /* Tasklet to reclaim TX descriptors and buffers after tx */ 204 /* Tasklet to perform TX reclaim. */
209 struct tasklet_struct tx_reclaim_task; 205 struct tasklet_struct poll_tx_task;
206
207 /* Tasklet to perform RX. */
208 struct tasklet_struct poll_rx_task;
210}; 209};
211 210
212/* Per interface specific private data */ 211/* Per interface specific private data */
213struct mwl8k_vif { 212struct mwl8k_vif {
214 /* backpointer to parent config block */ 213 struct list_head list;
215 struct mwl8k_priv *priv; 214 struct ieee80211_vif *vif;
216
217 /* BSS config of AP or IBSS from mac80211*/
218 struct ieee80211_bss_conf bss_info;
219
220 /* BSSID of AP or IBSS */
221 u8 bssid[ETH_ALEN];
222 u8 mac_addr[ETH_ALEN];
223 215
224 /* Index into station database.Returned by update_sta_db call */ 216 /* Firmware macid for this vif. */
225 u8 peer_id; 217 int macid;
226 218
227 /* Non AMPDU sequence number assigned by driver */ 219 /* Non AMPDU sequence number assigned by driver. */
228 u16 seqno; 220 u16 seqno;
229}; 221};
230
231#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv)) 222#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv))
232 223
233static const struct ieee80211_channel mwl8k_channels[] = { 224struct mwl8k_sta {
225 /* Index into station database. Returned by UPDATE_STADB. */
226 u8 peer_id;
227};
228#define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv))
229
230static const struct ieee80211_channel mwl8k_channels_24[] = {
234 { .center_freq = 2412, .hw_value = 1, }, 231 { .center_freq = 2412, .hw_value = 1, },
235 { .center_freq = 2417, .hw_value = 2, }, 232 { .center_freq = 2417, .hw_value = 2, },
236 { .center_freq = 2422, .hw_value = 3, }, 233 { .center_freq = 2422, .hw_value = 3, },
@@ -242,9 +239,12 @@ static const struct ieee80211_channel mwl8k_channels[] = {
242 { .center_freq = 2452, .hw_value = 9, }, 239 { .center_freq = 2452, .hw_value = 9, },
243 { .center_freq = 2457, .hw_value = 10, }, 240 { .center_freq = 2457, .hw_value = 10, },
244 { .center_freq = 2462, .hw_value = 11, }, 241 { .center_freq = 2462, .hw_value = 11, },
242 { .center_freq = 2467, .hw_value = 12, },
243 { .center_freq = 2472, .hw_value = 13, },
244 { .center_freq = 2484, .hw_value = 14, },
245}; 245};
246 246
247static const struct ieee80211_rate mwl8k_rates[] = { 247static const struct ieee80211_rate mwl8k_rates_24[] = {
248 { .bitrate = 10, .hw_value = 2, }, 248 { .bitrate = 10, .hw_value = 2, },
249 { .bitrate = 20, .hw_value = 4, }, 249 { .bitrate = 20, .hw_value = 4, },
250 { .bitrate = 55, .hw_value = 11, }, 250 { .bitrate = 55, .hw_value = 11, },
@@ -261,8 +261,23 @@ static const struct ieee80211_rate mwl8k_rates[] = {
261 { .bitrate = 720, .hw_value = 144, }, 261 { .bitrate = 720, .hw_value = 144, },
262}; 262};
263 263
264static const u8 mwl8k_rateids[12] = { 264static const struct ieee80211_channel mwl8k_channels_50[] = {
265 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108, 265 { .center_freq = 5180, .hw_value = 36, },
266 { .center_freq = 5200, .hw_value = 40, },
267 { .center_freq = 5220, .hw_value = 44, },
268 { .center_freq = 5240, .hw_value = 48, },
269};
270
271static const struct ieee80211_rate mwl8k_rates_50[] = {
272 { .bitrate = 60, .hw_value = 12, },
273 { .bitrate = 90, .hw_value = 18, },
274 { .bitrate = 120, .hw_value = 24, },
275 { .bitrate = 180, .hw_value = 36, },
276 { .bitrate = 240, .hw_value = 48, },
277 { .bitrate = 360, .hw_value = 72, },
278 { .bitrate = 480, .hw_value = 96, },
279 { .bitrate = 540, .hw_value = 108, },
280 { .bitrate = 720, .hw_value = 144, },
266}; 281};
267 282
268/* Set or get info from Firmware */ 283/* Set or get info from Firmware */
@@ -278,6 +293,7 @@ static const u8 mwl8k_rateids[12] = {
278#define MWL8K_CMD_RADIO_CONTROL 0x001c 293#define MWL8K_CMD_RADIO_CONTROL 0x001c
279#define MWL8K_CMD_RF_TX_POWER 0x001e 294#define MWL8K_CMD_RF_TX_POWER 0x001e
280#define MWL8K_CMD_RF_ANTENNA 0x0020 295#define MWL8K_CMD_RF_ANTENNA 0x0020
296#define MWL8K_CMD_SET_BEACON 0x0100 /* per-vif */
281#define MWL8K_CMD_SET_PRE_SCAN 0x0107 297#define MWL8K_CMD_SET_PRE_SCAN 0x0107
282#define MWL8K_CMD_SET_POST_SCAN 0x0108 298#define MWL8K_CMD_SET_POST_SCAN 0x0108
283#define MWL8K_CMD_SET_RF_CHANNEL 0x010a 299#define MWL8K_CMD_SET_RF_CHANNEL 0x010a
@@ -291,8 +307,10 @@ static const u8 mwl8k_rateids[12] = {
291#define MWL8K_CMD_MIMO_CONFIG 0x0125 307#define MWL8K_CMD_MIMO_CONFIG 0x0125
292#define MWL8K_CMD_USE_FIXED_RATE 0x0126 308#define MWL8K_CMD_USE_FIXED_RATE 0x0126
293#define MWL8K_CMD_ENABLE_SNIFFER 0x0150 309#define MWL8K_CMD_ENABLE_SNIFFER 0x0150
294#define MWL8K_CMD_SET_MAC_ADDR 0x0202 310#define MWL8K_CMD_SET_MAC_ADDR 0x0202 /* per-vif */
295#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203 311#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203
312#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */
313#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */
296#define MWL8K_CMD_UPDATE_STADB 0x1123 314#define MWL8K_CMD_UPDATE_STADB 0x1123
297 315
298static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize) 316static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
@@ -310,6 +328,7 @@ static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
310 MWL8K_CMDNAME(RADIO_CONTROL); 328 MWL8K_CMDNAME(RADIO_CONTROL);
311 MWL8K_CMDNAME(RF_TX_POWER); 329 MWL8K_CMDNAME(RF_TX_POWER);
312 MWL8K_CMDNAME(RF_ANTENNA); 330 MWL8K_CMDNAME(RF_ANTENNA);
331 MWL8K_CMDNAME(SET_BEACON);
313 MWL8K_CMDNAME(SET_PRE_SCAN); 332 MWL8K_CMDNAME(SET_PRE_SCAN);
314 MWL8K_CMDNAME(SET_POST_SCAN); 333 MWL8K_CMDNAME(SET_POST_SCAN);
315 MWL8K_CMDNAME(SET_RF_CHANNEL); 334 MWL8K_CMDNAME(SET_RF_CHANNEL);
@@ -325,6 +344,8 @@ static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
325 MWL8K_CMDNAME(ENABLE_SNIFFER); 344 MWL8K_CMDNAME(ENABLE_SNIFFER);
326 MWL8K_CMDNAME(SET_MAC_ADDR); 345 MWL8K_CMDNAME(SET_MAC_ADDR);
327 MWL8K_CMDNAME(SET_RATEADAPT_MODE); 346 MWL8K_CMDNAME(SET_RATEADAPT_MODE);
347 MWL8K_CMDNAME(BSS_START);
348 MWL8K_CMDNAME(SET_NEW_STN);
328 MWL8K_CMDNAME(UPDATE_STADB); 349 MWL8K_CMDNAME(UPDATE_STADB);
329 default: 350 default:
330 snprintf(buf, bufsize, "0x%x", cmd); 351 snprintf(buf, bufsize, "0x%x", cmd);
@@ -355,8 +376,8 @@ static void mwl8k_release_fw(struct firmware **fw)
355 376
356static void mwl8k_release_firmware(struct mwl8k_priv *priv) 377static void mwl8k_release_firmware(struct mwl8k_priv *priv)
357{ 378{
358 mwl8k_release_fw(&priv->fw.ucode); 379 mwl8k_release_fw(&priv->fw_ucode);
359 mwl8k_release_fw(&priv->fw.helper); 380 mwl8k_release_fw(&priv->fw_helper);
360} 381}
361 382
362/* Request fw image */ 383/* Request fw image */
@@ -377,7 +398,7 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv)
377 int rc; 398 int rc;
378 399
379 if (di->helper_image != NULL) { 400 if (di->helper_image != NULL) {
380 rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw.helper); 401 rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw_helper);
381 if (rc) { 402 if (rc) {
382 printk(KERN_ERR "%s: Error requesting helper " 403 printk(KERN_ERR "%s: Error requesting helper "
383 "firmware file %s\n", pci_name(priv->pdev), 404 "firmware file %s\n", pci_name(priv->pdev),
@@ -386,24 +407,22 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv)
386 } 407 }
387 } 408 }
388 409
389 rc = mwl8k_request_fw(priv, di->fw_image, &priv->fw.ucode); 410 rc = mwl8k_request_fw(priv, di->fw_image, &priv->fw_ucode);
390 if (rc) { 411 if (rc) {
391 printk(KERN_ERR "%s: Error requesting firmware file %s\n", 412 printk(KERN_ERR "%s: Error requesting firmware file %s\n",
392 pci_name(priv->pdev), di->fw_image); 413 pci_name(priv->pdev), di->fw_image);
393 mwl8k_release_fw(&priv->fw.helper); 414 mwl8k_release_fw(&priv->fw_helper);
394 return rc; 415 return rc;
395 } 416 }
396 417
397 return 0; 418 return 0;
398} 419}
399 420
400MODULE_FIRMWARE("mwl8k/helper_8687.fw");
401MODULE_FIRMWARE("mwl8k/fmimage_8687.fw");
402
403struct mwl8k_cmd_pkt { 421struct mwl8k_cmd_pkt {
404 __le16 code; 422 __le16 code;
405 __le16 length; 423 __le16 length;
406 __le16 seq_num; 424 __u8 seq_num;
425 __u8 macid;
407 __le16 result; 426 __le16 result;
408 char payload[0]; 427 char payload[0];
409} __attribute__((packed)); 428} __attribute__((packed));
@@ -461,6 +480,7 @@ static int mwl8k_load_fw_image(struct mwl8k_priv *priv,
461 480
462 cmd->code = cpu_to_le16(MWL8K_CMD_CODE_DNLD); 481 cmd->code = cpu_to_le16(MWL8K_CMD_CODE_DNLD);
463 cmd->seq_num = 0; 482 cmd->seq_num = 0;
483 cmd->macid = 0;
464 cmd->result = 0; 484 cmd->result = 0;
465 485
466 done = 0; 486 done = 0;
@@ -551,13 +571,12 @@ static int mwl8k_feed_fw_image(struct mwl8k_priv *priv,
551static int mwl8k_load_firmware(struct ieee80211_hw *hw) 571static int mwl8k_load_firmware(struct ieee80211_hw *hw)
552{ 572{
553 struct mwl8k_priv *priv = hw->priv; 573 struct mwl8k_priv *priv = hw->priv;
554 struct firmware *fw = priv->fw.ucode; 574 struct firmware *fw = priv->fw_ucode;
555 struct mwl8k_device_info *di = priv->device_info;
556 int rc; 575 int rc;
557 int loops; 576 int loops;
558 577
559 if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) { 578 if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) {
560 struct firmware *helper = priv->fw.helper; 579 struct firmware *helper = priv->fw_helper;
561 580
562 if (helper == NULL) { 581 if (helper == NULL) {
563 printk(KERN_ERR "%s: helper image needed but none " 582 printk(KERN_ERR "%s: helper image needed but none "
@@ -584,10 +603,7 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
584 return rc; 603 return rc;
585 } 604 }
586 605
587 if (di->modes & BIT(NL80211_IFTYPE_AP)) 606 iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR);
588 iowrite32(MWL8K_MODE_AP, priv->regs + MWL8K_HIU_GEN_PTR);
589 else
590 iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR);
591 607
592 loops = 500000; 608 loops = 500000;
593 do { 609 do {
@@ -610,91 +626,6 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
610} 626}
611 627
612 628
613/*
614 * Defines shared between transmission and reception.
615 */
616/* HT control fields for firmware */
617struct ewc_ht_info {
618 __le16 control1;
619 __le16 control2;
620 __le16 control3;
621} __attribute__((packed));
622
623/* Firmware Station database operations */
624#define MWL8K_STA_DB_ADD_ENTRY 0
625#define MWL8K_STA_DB_MODIFY_ENTRY 1
626#define MWL8K_STA_DB_DEL_ENTRY 2
627#define MWL8K_STA_DB_FLUSH 3
628
629/* Peer Entry flags - used to define the type of the peer node */
630#define MWL8K_PEER_TYPE_ACCESSPOINT 2
631
632struct peer_capability_info {
633 /* Peer type - AP vs. STA. */
634 __u8 peer_type;
635
636 /* Basic 802.11 capabilities from assoc resp. */
637 __le16 basic_caps;
638
639 /* Set if peer supports 802.11n high throughput (HT). */
640 __u8 ht_support;
641
642 /* Valid if HT is supported. */
643 __le16 ht_caps;
644 __u8 extended_ht_caps;
645 struct ewc_ht_info ewc_info;
646
647 /* Legacy rate table. Intersection of our rates and peer rates. */
648 __u8 legacy_rates[12];
649
650 /* HT rate table. Intersection of our rates and peer rates. */
651 __u8 ht_rates[16];
652 __u8 pad[16];
653
654 /* If set, interoperability mode, no proprietary extensions. */
655 __u8 interop;
656 __u8 pad2;
657 __u8 station_id;
658 __le16 amsdu_enabled;
659} __attribute__((packed));
660
661/* Inline functions to manipulate QoS field in data descriptor. */
662static inline u16 mwl8k_qos_setbit_eosp(u16 qos)
663{
664 u16 val_mask = 1 << 4;
665
666 /* End of Service Period Bit 4 */
667 return qos | val_mask;
668}
669
670static inline u16 mwl8k_qos_setbit_ack(u16 qos, u8 ack_policy)
671{
672 u16 val_mask = 0x3;
673 u8 shift = 5;
674 u16 qos_mask = ~(val_mask << shift);
675
676 /* Ack Policy Bit 5-6 */
677 return (qos & qos_mask) | ((ack_policy & val_mask) << shift);
678}
679
680static inline u16 mwl8k_qos_setbit_amsdu(u16 qos)
681{
682 u16 val_mask = 1 << 7;
683
684 /* AMSDU present Bit 7 */
685 return qos | val_mask;
686}
687
688static inline u16 mwl8k_qos_setbit_qlen(u16 qos, u8 len)
689{
690 u16 val_mask = 0xff;
691 u8 shift = 8;
692 u16 qos_mask = ~(val_mask << shift);
693
694 /* Queue Length Bits 8-15 */
695 return (qos & qos_mask) | ((len & val_mask) << shift);
696}
697
698/* DMA header used by firmware and hardware. */ 629/* DMA header used by firmware and hardware. */
699struct mwl8k_dma_data { 630struct mwl8k_dma_data {
700 __le16 fwlen; 631 __le16 fwlen;
@@ -761,9 +692,9 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
761 692
762 693
763/* 694/*
764 * Packet reception for 88w8366. 695 * Packet reception for 88w8366 AP firmware.
765 */ 696 */
766struct mwl8k_rxd_8366 { 697struct mwl8k_rxd_8366_ap {
767 __le16 pkt_len; 698 __le16 pkt_len;
768 __u8 sq2; 699 __u8 sq2;
769 __u8 rate; 700 __u8 rate;
@@ -781,23 +712,23 @@ struct mwl8k_rxd_8366 {
781 __u8 rx_ctrl; 712 __u8 rx_ctrl;
782} __attribute__((packed)); 713} __attribute__((packed));
783 714
784#define MWL8K_8366_RATE_INFO_MCS_FORMAT 0x80 715#define MWL8K_8366_AP_RATE_INFO_MCS_FORMAT 0x80
785#define MWL8K_8366_RATE_INFO_40MHZ 0x40 716#define MWL8K_8366_AP_RATE_INFO_40MHZ 0x40
786#define MWL8K_8366_RATE_INFO_RATEID(x) ((x) & 0x3f) 717#define MWL8K_8366_AP_RATE_INFO_RATEID(x) ((x) & 0x3f)
787 718
788#define MWL8K_8366_RX_CTRL_OWNED_BY_HOST 0x80 719#define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST 0x80
789 720
790static void mwl8k_rxd_8366_init(void *_rxd, dma_addr_t next_dma_addr) 721static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr)
791{ 722{
792 struct mwl8k_rxd_8366 *rxd = _rxd; 723 struct mwl8k_rxd_8366_ap *rxd = _rxd;
793 724
794 rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr); 725 rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
795 rxd->rx_ctrl = MWL8K_8366_RX_CTRL_OWNED_BY_HOST; 726 rxd->rx_ctrl = MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST;
796} 727}
797 728
798static void mwl8k_rxd_8366_refill(void *_rxd, dma_addr_t addr, int len) 729static void mwl8k_rxd_8366_ap_refill(void *_rxd, dma_addr_t addr, int len)
799{ 730{
800 struct mwl8k_rxd_8366 *rxd = _rxd; 731 struct mwl8k_rxd_8366_ap *rxd = _rxd;
801 732
802 rxd->pkt_len = cpu_to_le16(len); 733 rxd->pkt_len = cpu_to_le16(len);
803 rxd->pkt_phys_addr = cpu_to_le32(addr); 734 rxd->pkt_phys_addr = cpu_to_le32(addr);
@@ -806,12 +737,12 @@ static void mwl8k_rxd_8366_refill(void *_rxd, dma_addr_t addr, int len)
806} 737}
807 738
808static int 739static int
809mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status, 740mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
810 __le16 *qos) 741 __le16 *qos)
811{ 742{
812 struct mwl8k_rxd_8366 *rxd = _rxd; 743 struct mwl8k_rxd_8366_ap *rxd = _rxd;
813 744
814 if (!(rxd->rx_ctrl & MWL8K_8366_RX_CTRL_OWNED_BY_HOST)) 745 if (!(rxd->rx_ctrl & MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST))
815 return -1; 746 return -1;
816 rmb(); 747 rmb();
817 748
@@ -820,23 +751,29 @@ mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status,
820 status->signal = -rxd->rssi; 751 status->signal = -rxd->rssi;
821 status->noise = -rxd->noise_floor; 752 status->noise = -rxd->noise_floor;
822 753
823 if (rxd->rate & MWL8K_8366_RATE_INFO_MCS_FORMAT) { 754 if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) {
824 status->flag |= RX_FLAG_HT; 755 status->flag |= RX_FLAG_HT;
825 if (rxd->rate & MWL8K_8366_RATE_INFO_40MHZ) 756 if (rxd->rate & MWL8K_8366_AP_RATE_INFO_40MHZ)
826 status->flag |= RX_FLAG_40MHZ; 757 status->flag |= RX_FLAG_40MHZ;
827 status->rate_idx = MWL8K_8366_RATE_INFO_RATEID(rxd->rate); 758 status->rate_idx = MWL8K_8366_AP_RATE_INFO_RATEID(rxd->rate);
828 } else { 759 } else {
829 int i; 760 int i;
830 761
831 for (i = 0; i < ARRAY_SIZE(mwl8k_rates); i++) { 762 for (i = 0; i < ARRAY_SIZE(mwl8k_rates_24); i++) {
832 if (mwl8k_rates[i].hw_value == rxd->rate) { 763 if (mwl8k_rates_24[i].hw_value == rxd->rate) {
833 status->rate_idx = i; 764 status->rate_idx = i;
834 break; 765 break;
835 } 766 }
836 } 767 }
837 } 768 }
838 769
839 status->band = IEEE80211_BAND_2GHZ; 770 if (rxd->channel > 14) {
771 status->band = IEEE80211_BAND_5GHZ;
772 if (!(status->flag & RX_FLAG_HT))
773 status->rate_idx -= 5;
774 } else {
775 status->band = IEEE80211_BAND_2GHZ;
776 }
840 status->freq = ieee80211_channel_to_frequency(rxd->channel); 777 status->freq = ieee80211_channel_to_frequency(rxd->channel);
841 778
842 *qos = rxd->qos_control; 779 *qos = rxd->qos_control;
@@ -844,17 +781,17 @@ mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status,
844 return le16_to_cpu(rxd->pkt_len); 781 return le16_to_cpu(rxd->pkt_len);
845} 782}
846 783
847static struct rxd_ops rxd_8366_ops = { 784static struct rxd_ops rxd_8366_ap_ops = {
848 .rxd_size = sizeof(struct mwl8k_rxd_8366), 785 .rxd_size = sizeof(struct mwl8k_rxd_8366_ap),
849 .rxd_init = mwl8k_rxd_8366_init, 786 .rxd_init = mwl8k_rxd_8366_ap_init,
850 .rxd_refill = mwl8k_rxd_8366_refill, 787 .rxd_refill = mwl8k_rxd_8366_ap_refill,
851 .rxd_process = mwl8k_rxd_8366_process, 788 .rxd_process = mwl8k_rxd_8366_ap_process,
852}; 789};
853 790
854/* 791/*
855 * Packet reception for 88w8687. 792 * Packet reception for STA firmware.
856 */ 793 */
857struct mwl8k_rxd_8687 { 794struct mwl8k_rxd_sta {
858 __le16 pkt_len; 795 __le16 pkt_len;
859 __u8 link_quality; 796 __u8 link_quality;
860 __u8 noise_level; 797 __u8 noise_level;
@@ -871,26 +808,26 @@ struct mwl8k_rxd_8687 {
871 __u8 pad2[2]; 808 __u8 pad2[2];
872} __attribute__((packed)); 809} __attribute__((packed));
873 810
874#define MWL8K_8687_RATE_INFO_SHORTPRE 0x8000 811#define MWL8K_STA_RATE_INFO_SHORTPRE 0x8000
875#define MWL8K_8687_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3) 812#define MWL8K_STA_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3)
876#define MWL8K_8687_RATE_INFO_RATEID(x) (((x) >> 3) & 0x3f) 813#define MWL8K_STA_RATE_INFO_RATEID(x) (((x) >> 3) & 0x3f)
877#define MWL8K_8687_RATE_INFO_40MHZ 0x0004 814#define MWL8K_STA_RATE_INFO_40MHZ 0x0004
878#define MWL8K_8687_RATE_INFO_SHORTGI 0x0002 815#define MWL8K_STA_RATE_INFO_SHORTGI 0x0002
879#define MWL8K_8687_RATE_INFO_MCS_FORMAT 0x0001 816#define MWL8K_STA_RATE_INFO_MCS_FORMAT 0x0001
880 817
881#define MWL8K_8687_RX_CTRL_OWNED_BY_HOST 0x02 818#define MWL8K_STA_RX_CTRL_OWNED_BY_HOST 0x02
882 819
883static void mwl8k_rxd_8687_init(void *_rxd, dma_addr_t next_dma_addr) 820static void mwl8k_rxd_sta_init(void *_rxd, dma_addr_t next_dma_addr)
884{ 821{
885 struct mwl8k_rxd_8687 *rxd = _rxd; 822 struct mwl8k_rxd_sta *rxd = _rxd;
886 823
887 rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr); 824 rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
888 rxd->rx_ctrl = MWL8K_8687_RX_CTRL_OWNED_BY_HOST; 825 rxd->rx_ctrl = MWL8K_STA_RX_CTRL_OWNED_BY_HOST;
889} 826}
890 827
891static void mwl8k_rxd_8687_refill(void *_rxd, dma_addr_t addr, int len) 828static void mwl8k_rxd_sta_refill(void *_rxd, dma_addr_t addr, int len)
892{ 829{
893 struct mwl8k_rxd_8687 *rxd = _rxd; 830 struct mwl8k_rxd_sta *rxd = _rxd;
894 831
895 rxd->pkt_len = cpu_to_le16(len); 832 rxd->pkt_len = cpu_to_le16(len);
896 rxd->pkt_phys_addr = cpu_to_le32(addr); 833 rxd->pkt_phys_addr = cpu_to_le32(addr);
@@ -899,13 +836,13 @@ static void mwl8k_rxd_8687_refill(void *_rxd, dma_addr_t addr, int len)
899} 836}
900 837
901static int 838static int
902mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status, 839mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
903 __le16 *qos) 840 __le16 *qos)
904{ 841{
905 struct mwl8k_rxd_8687 *rxd = _rxd; 842 struct mwl8k_rxd_sta *rxd = _rxd;
906 u16 rate_info; 843 u16 rate_info;
907 844
908 if (!(rxd->rx_ctrl & MWL8K_8687_RX_CTRL_OWNED_BY_HOST)) 845 if (!(rxd->rx_ctrl & MWL8K_STA_RX_CTRL_OWNED_BY_HOST))
909 return -1; 846 return -1;
910 rmb(); 847 rmb();
911 848
@@ -915,19 +852,25 @@ mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status,
915 852
916 status->signal = -rxd->rssi; 853 status->signal = -rxd->rssi;
917 status->noise = -rxd->noise_level; 854 status->noise = -rxd->noise_level;
918 status->antenna = MWL8K_8687_RATE_INFO_ANTSELECT(rate_info); 855 status->antenna = MWL8K_STA_RATE_INFO_ANTSELECT(rate_info);
919 status->rate_idx = MWL8K_8687_RATE_INFO_RATEID(rate_info); 856 status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info);
920 857
921 if (rate_info & MWL8K_8687_RATE_INFO_SHORTPRE) 858 if (rate_info & MWL8K_STA_RATE_INFO_SHORTPRE)
922 status->flag |= RX_FLAG_SHORTPRE; 859 status->flag |= RX_FLAG_SHORTPRE;
923 if (rate_info & MWL8K_8687_RATE_INFO_40MHZ) 860 if (rate_info & MWL8K_STA_RATE_INFO_40MHZ)
924 status->flag |= RX_FLAG_40MHZ; 861 status->flag |= RX_FLAG_40MHZ;
925 if (rate_info & MWL8K_8687_RATE_INFO_SHORTGI) 862 if (rate_info & MWL8K_STA_RATE_INFO_SHORTGI)
926 status->flag |= RX_FLAG_SHORT_GI; 863 status->flag |= RX_FLAG_SHORT_GI;
927 if (rate_info & MWL8K_8687_RATE_INFO_MCS_FORMAT) 864 if (rate_info & MWL8K_STA_RATE_INFO_MCS_FORMAT)
928 status->flag |= RX_FLAG_HT; 865 status->flag |= RX_FLAG_HT;
929 866
930 status->band = IEEE80211_BAND_2GHZ; 867 if (rxd->channel > 14) {
868 status->band = IEEE80211_BAND_5GHZ;
869 if (!(status->flag & RX_FLAG_HT))
870 status->rate_idx -= 5;
871 } else {
872 status->band = IEEE80211_BAND_2GHZ;
873 }
931 status->freq = ieee80211_channel_to_frequency(rxd->channel); 874 status->freq = ieee80211_channel_to_frequency(rxd->channel);
932 875
933 *qos = rxd->qos_control; 876 *qos = rxd->qos_control;
@@ -935,11 +878,11 @@ mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status,
935 return le16_to_cpu(rxd->pkt_len); 878 return le16_to_cpu(rxd->pkt_len);
936} 879}
937 880
938static struct rxd_ops rxd_8687_ops = { 881static struct rxd_ops rxd_sta_ops = {
939 .rxd_size = sizeof(struct mwl8k_rxd_8687), 882 .rxd_size = sizeof(struct mwl8k_rxd_sta),
940 .rxd_init = mwl8k_rxd_8687_init, 883 .rxd_init = mwl8k_rxd_sta_init,
941 .rxd_refill = mwl8k_rxd_8687_refill, 884 .rxd_refill = mwl8k_rxd_sta_refill,
942 .rxd_process = mwl8k_rxd_8687_process, 885 .rxd_process = mwl8k_rxd_sta_process,
943}; 886};
944 887
945 888
@@ -1153,16 +1096,18 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
1153 * Packet transmission. 1096 * Packet transmission.
1154 */ 1097 */
1155 1098
1156/* Transmit packet ACK policy */
1157#define MWL8K_TXD_ACK_POLICY_NORMAL 0
1158#define MWL8K_TXD_ACK_POLICY_BLOCKACK 3
1159
1160#define MWL8K_TXD_STATUS_OK 0x00000001 1099#define MWL8K_TXD_STATUS_OK 0x00000001
1161#define MWL8K_TXD_STATUS_OK_RETRY 0x00000002 1100#define MWL8K_TXD_STATUS_OK_RETRY 0x00000002
1162#define MWL8K_TXD_STATUS_OK_MORE_RETRY 0x00000004 1101#define MWL8K_TXD_STATUS_OK_MORE_RETRY 0x00000004
1163#define MWL8K_TXD_STATUS_MULTICAST_TX 0x00000008 1102#define MWL8K_TXD_STATUS_MULTICAST_TX 0x00000008
1164#define MWL8K_TXD_STATUS_FW_OWNED 0x80000000 1103#define MWL8K_TXD_STATUS_FW_OWNED 0x80000000
1165 1104
1105#define MWL8K_QOS_QLEN_UNSPEC 0xff00
1106#define MWL8K_QOS_ACK_POLICY_MASK 0x0060
1107#define MWL8K_QOS_ACK_POLICY_NORMAL 0x0000
1108#define MWL8K_QOS_ACK_POLICY_BLOCKACK 0x0060
1109#define MWL8K_QOS_EOSP 0x0010
1110
1166struct mwl8k_tx_desc { 1111struct mwl8k_tx_desc {
1167 __le32 status; 1112 __le32 status;
1168 __u8 data_rate; 1113 __u8 data_rate;
@@ -1187,8 +1132,7 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
1187 int size; 1132 int size;
1188 int i; 1133 int i;
1189 1134
1190 memset(&txq->stats, 0, sizeof(struct ieee80211_tx_queue_stats)); 1135 txq->len = 0;
1191 txq->stats.limit = MWL8K_TX_DESCS;
1192 txq->head = 0; 1136 txq->head = 0;
1193 txq->tail = 0; 1137 txq->tail = 0;
1194 1138
@@ -1264,7 +1208,7 @@ static void mwl8k_dump_tx_rings(struct ieee80211_hw *hw)
1264 printk(KERN_ERR "%s: txq[%d] len=%d head=%d tail=%d " 1208 printk(KERN_ERR "%s: txq[%d] len=%d head=%d tail=%d "
1265 "fw_owned=%d drv_owned=%d unused=%d\n", 1209 "fw_owned=%d drv_owned=%d unused=%d\n",
1266 wiphy_name(hw->wiphy), i, 1210 wiphy_name(hw->wiphy), i,
1267 txq->stats.len, txq->head, txq->tail, 1211 txq->len, txq->head, txq->tail,
1268 fw_owned, drv_owned, unused); 1212 fw_owned, drv_owned, unused);
1269 } 1213 }
1270} 1214}
@@ -1272,7 +1216,7 @@ static void mwl8k_dump_tx_rings(struct ieee80211_hw *hw)
1272/* 1216/*
1273 * Must be called with priv->fw_mutex held and tx queues stopped. 1217 * Must be called with priv->fw_mutex held and tx queues stopped.
1274 */ 1218 */
1275#define MWL8K_TX_WAIT_TIMEOUT_MS 1000 1219#define MWL8K_TX_WAIT_TIMEOUT_MS 5000
1276 1220
1277static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw) 1221static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
1278{ 1222{
@@ -1316,8 +1260,8 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
1316 } 1260 }
1317 1261
1318 if (priv->pending_tx_pkts < oldcount) { 1262 if (priv->pending_tx_pkts < oldcount) {
1319 printk(KERN_NOTICE "%s: timeout waiting for tx " 1263 printk(KERN_NOTICE "%s: waiting for tx rings "
1320 "rings to drain (%d -> %d pkts), retrying\n", 1264 "to drain (%d -> %d pkts)\n",
1321 wiphy_name(hw->wiphy), oldcount, 1265 wiphy_name(hw->wiphy), oldcount,
1322 priv->pending_tx_pkts); 1266 priv->pending_tx_pkts);
1323 retry = 1; 1267 retry = 1;
@@ -1342,13 +1286,15 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
1342 MWL8K_TXD_STATUS_OK_RETRY | \ 1286 MWL8K_TXD_STATUS_OK_RETRY | \
1343 MWL8K_TXD_STATUS_OK_MORE_RETRY)) 1287 MWL8K_TXD_STATUS_OK_MORE_RETRY))
1344 1288
1345static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force) 1289static int
1290mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
1346{ 1291{
1347 struct mwl8k_priv *priv = hw->priv; 1292 struct mwl8k_priv *priv = hw->priv;
1348 struct mwl8k_tx_queue *txq = priv->txq + index; 1293 struct mwl8k_tx_queue *txq = priv->txq + index;
1349 int wake = 0; 1294 int processed;
1350 1295
1351 while (txq->stats.len > 0) { 1296 processed = 0;
1297 while (txq->len > 0 && limit--) {
1352 int tx; 1298 int tx;
1353 struct mwl8k_tx_desc *tx_desc; 1299 struct mwl8k_tx_desc *tx_desc;
1354 unsigned long addr; 1300 unsigned long addr;
@@ -1370,8 +1316,8 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
1370 } 1316 }
1371 1317
1372 txq->head = (tx + 1) % MWL8K_TX_DESCS; 1318 txq->head = (tx + 1) % MWL8K_TX_DESCS;
1373 BUG_ON(txq->stats.len == 0); 1319 BUG_ON(txq->len == 0);
1374 txq->stats.len--; 1320 txq->len--;
1375 priv->pending_tx_pkts--; 1321 priv->pending_tx_pkts--;
1376 1322
1377 addr = le32_to_cpu(tx_desc->pkt_phys_addr); 1323 addr = le32_to_cpu(tx_desc->pkt_phys_addr);
@@ -1395,11 +1341,13 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
1395 1341
1396 ieee80211_tx_status_irqsafe(hw, skb); 1342 ieee80211_tx_status_irqsafe(hw, skb);
1397 1343
1398 wake = 1; 1344 processed++;
1399 } 1345 }
1400 1346
1401 if (wake && priv->radio_on && !mutex_is_locked(&priv->fw_mutex)) 1347 if (processed && priv->radio_on && !mutex_is_locked(&priv->fw_mutex))
1402 ieee80211_wake_queue(hw, index); 1348 ieee80211_wake_queue(hw, index);
1349
1350 return processed;
1403} 1351}
1404 1352
1405/* must be called only when the card's transmit is completely halted */ 1353/* must be called only when the card's transmit is completely halted */
@@ -1408,7 +1356,7 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
1408 struct mwl8k_priv *priv = hw->priv; 1356 struct mwl8k_priv *priv = hw->priv;
1409 struct mwl8k_tx_queue *txq = priv->txq + index; 1357 struct mwl8k_tx_queue *txq = priv->txq + index;
1410 1358
1411 mwl8k_txq_reclaim(hw, index, 1); 1359 mwl8k_txq_reclaim(hw, index, INT_MAX, 1);
1412 1360
1413 kfree(txq->skb); 1361 kfree(txq->skb);
1414 txq->skb = NULL; 1362 txq->skb = NULL;
@@ -1446,11 +1394,9 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1446 mwl8k_vif = MWL8K_VIF(tx_info->control.vif); 1394 mwl8k_vif = MWL8K_VIF(tx_info->control.vif);
1447 1395
1448 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 1396 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1449 u16 seqno = mwl8k_vif->seqno;
1450
1451 wh->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 1397 wh->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1452 wh->seq_ctrl |= cpu_to_le16(seqno << 4); 1398 wh->seq_ctrl |= cpu_to_le16(mwl8k_vif->seqno);
1453 mwl8k_vif->seqno = seqno++ % 4096; 1399 mwl8k_vif->seqno += 0x10;
1454 } 1400 }
1455 1401
1456 /* Setup firmware control bit fields for each frame type. */ 1402 /* Setup firmware control bit fields for each frame type. */
@@ -1459,24 +1405,17 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1459 if (ieee80211_is_mgmt(wh->frame_control) || 1405 if (ieee80211_is_mgmt(wh->frame_control) ||
1460 ieee80211_is_ctl(wh->frame_control)) { 1406 ieee80211_is_ctl(wh->frame_control)) {
1461 txdatarate = 0; 1407 txdatarate = 0;
1462 qos = mwl8k_qos_setbit_eosp(qos); 1408 qos |= MWL8K_QOS_QLEN_UNSPEC | MWL8K_QOS_EOSP;
1463 /* Set Queue size to unspecified */
1464 qos = mwl8k_qos_setbit_qlen(qos, 0xff);
1465 } else if (ieee80211_is_data(wh->frame_control)) { 1409 } else if (ieee80211_is_data(wh->frame_control)) {
1466 txdatarate = 1; 1410 txdatarate = 1;
1467 if (is_multicast_ether_addr(wh->addr1)) 1411 if (is_multicast_ether_addr(wh->addr1))
1468 txstatus |= MWL8K_TXD_STATUS_MULTICAST_TX; 1412 txstatus |= MWL8K_TXD_STATUS_MULTICAST_TX;
1469 1413
1470 /* Send pkt in an aggregate if AMPDU frame. */ 1414 qos &= ~MWL8K_QOS_ACK_POLICY_MASK;
1471 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) 1415 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
1472 qos = mwl8k_qos_setbit_ack(qos, 1416 qos |= MWL8K_QOS_ACK_POLICY_BLOCKACK;
1473 MWL8K_TXD_ACK_POLICY_BLOCKACK);
1474 else 1417 else
1475 qos = mwl8k_qos_setbit_ack(qos, 1418 qos |= MWL8K_QOS_ACK_POLICY_NORMAL;
1476 MWL8K_TXD_ACK_POLICY_NORMAL);
1477
1478 if (qos & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
1479 qos = mwl8k_qos_setbit_amsdu(qos);
1480 } 1419 }
1481 1420
1482 dma = pci_map_single(priv->pdev, skb->data, 1421 dma = pci_map_single(priv->pdev, skb->data,
@@ -1503,12 +1442,14 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1503 tx->pkt_phys_addr = cpu_to_le32(dma); 1442 tx->pkt_phys_addr = cpu_to_le32(dma);
1504 tx->pkt_len = cpu_to_le16(skb->len); 1443 tx->pkt_len = cpu_to_le16(skb->len);
1505 tx->rate_info = 0; 1444 tx->rate_info = 0;
1506 tx->peer_id = mwl8k_vif->peer_id; 1445 if (!priv->ap_fw && tx_info->control.sta != NULL)
1446 tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id;
1447 else
1448 tx->peer_id = 0;
1507 wmb(); 1449 wmb();
1508 tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus); 1450 tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus);
1509 1451
1510 txq->stats.count++; 1452 txq->len++;
1511 txq->stats.len++;
1512 priv->pending_tx_pkts++; 1453 priv->pending_tx_pkts++;
1513 1454
1514 txq->tail++; 1455 txq->tail++;
@@ -1656,6 +1597,56 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
1656 return rc; 1597 return rc;
1657} 1598}
1658 1599
1600static int mwl8k_post_pervif_cmd(struct ieee80211_hw *hw,
1601 struct ieee80211_vif *vif,
1602 struct mwl8k_cmd_pkt *cmd)
1603{
1604 if (vif != NULL)
1605 cmd->macid = MWL8K_VIF(vif)->macid;
1606 return mwl8k_post_cmd(hw, cmd);
1607}
1608
1609/*
1610 * Setup code shared between STA and AP firmware images.
1611 */
1612static void mwl8k_setup_2ghz_band(struct ieee80211_hw *hw)
1613{
1614 struct mwl8k_priv *priv = hw->priv;
1615
1616 BUILD_BUG_ON(sizeof(priv->channels_24) != sizeof(mwl8k_channels_24));
1617 memcpy(priv->channels_24, mwl8k_channels_24, sizeof(mwl8k_channels_24));
1618
1619 BUILD_BUG_ON(sizeof(priv->rates_24) != sizeof(mwl8k_rates_24));
1620 memcpy(priv->rates_24, mwl8k_rates_24, sizeof(mwl8k_rates_24));
1621
1622 priv->band_24.band = IEEE80211_BAND_2GHZ;
1623 priv->band_24.channels = priv->channels_24;
1624 priv->band_24.n_channels = ARRAY_SIZE(mwl8k_channels_24);
1625 priv->band_24.bitrates = priv->rates_24;
1626 priv->band_24.n_bitrates = ARRAY_SIZE(mwl8k_rates_24);
1627
1628 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band_24;
1629}
1630
1631static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw)
1632{
1633 struct mwl8k_priv *priv = hw->priv;
1634
1635 BUILD_BUG_ON(sizeof(priv->channels_50) != sizeof(mwl8k_channels_50));
1636 memcpy(priv->channels_50, mwl8k_channels_50, sizeof(mwl8k_channels_50));
1637
1638 BUILD_BUG_ON(sizeof(priv->rates_50) != sizeof(mwl8k_rates_50));
1639 memcpy(priv->rates_50, mwl8k_rates_50, sizeof(mwl8k_rates_50));
1640
1641 priv->band_50.band = IEEE80211_BAND_5GHZ;
1642 priv->band_50.channels = priv->channels_50;
1643 priv->band_50.n_channels = ARRAY_SIZE(mwl8k_channels_50);
1644 priv->band_50.bitrates = priv->rates_50;
1645 priv->band_50.n_bitrates = ARRAY_SIZE(mwl8k_rates_50);
1646
1647 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->band_50;
1648}
1649
1659/* 1650/*
1660 * CMD_GET_HW_SPEC (STA version). 1651 * CMD_GET_HW_SPEC (STA version).
1661 */ 1652 */
@@ -1678,6 +1669,89 @@ struct mwl8k_cmd_get_hw_spec_sta {
1678 __le32 total_rxd; 1669 __le32 total_rxd;
1679} __attribute__((packed)); 1670} __attribute__((packed));
1680 1671
1672#define MWL8K_CAP_MAX_AMSDU 0x20000000
1673#define MWL8K_CAP_GREENFIELD 0x08000000
1674#define MWL8K_CAP_AMPDU 0x04000000
1675#define MWL8K_CAP_RX_STBC 0x01000000
1676#define MWL8K_CAP_TX_STBC 0x00800000
1677#define MWL8K_CAP_SHORTGI_40MHZ 0x00400000
1678#define MWL8K_CAP_SHORTGI_20MHZ 0x00200000
1679#define MWL8K_CAP_RX_ANTENNA_MASK 0x000e0000
1680#define MWL8K_CAP_TX_ANTENNA_MASK 0x0001c000
1681#define MWL8K_CAP_DELAY_BA 0x00003000
1682#define MWL8K_CAP_MIMO 0x00000200
1683#define MWL8K_CAP_40MHZ 0x00000100
1684#define MWL8K_CAP_BAND_MASK 0x00000007
1685#define MWL8K_CAP_5GHZ 0x00000004
1686#define MWL8K_CAP_2GHZ4 0x00000001
1687
1688static void
1689mwl8k_set_ht_caps(struct ieee80211_hw *hw,
1690 struct ieee80211_supported_band *band, u32 cap)
1691{
1692 int rx_streams;
1693 int tx_streams;
1694
1695 band->ht_cap.ht_supported = 1;
1696
1697 if (cap & MWL8K_CAP_MAX_AMSDU)
1698 band->ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
1699 if (cap & MWL8K_CAP_GREENFIELD)
1700 band->ht_cap.cap |= IEEE80211_HT_CAP_GRN_FLD;
1701 if (cap & MWL8K_CAP_AMPDU) {
1702 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
1703 band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
1704 band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
1705 }
1706 if (cap & MWL8K_CAP_RX_STBC)
1707 band->ht_cap.cap |= IEEE80211_HT_CAP_RX_STBC;
1708 if (cap & MWL8K_CAP_TX_STBC)
1709 band->ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
1710 if (cap & MWL8K_CAP_SHORTGI_40MHZ)
1711 band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
1712 if (cap & MWL8K_CAP_SHORTGI_20MHZ)
1713 band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
1714 if (cap & MWL8K_CAP_DELAY_BA)
1715 band->ht_cap.cap |= IEEE80211_HT_CAP_DELAY_BA;
1716 if (cap & MWL8K_CAP_40MHZ)
1717 band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
1718
1719 rx_streams = hweight32(cap & MWL8K_CAP_RX_ANTENNA_MASK);
1720 tx_streams = hweight32(cap & MWL8K_CAP_TX_ANTENNA_MASK);
1721
1722 band->ht_cap.mcs.rx_mask[0] = 0xff;
1723 if (rx_streams >= 2)
1724 band->ht_cap.mcs.rx_mask[1] = 0xff;
1725 if (rx_streams >= 3)
1726 band->ht_cap.mcs.rx_mask[2] = 0xff;
1727 band->ht_cap.mcs.rx_mask[4] = 0x01;
1728 band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
1729
1730 if (rx_streams != tx_streams) {
1731 band->ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
1732 band->ht_cap.mcs.tx_params |= (tx_streams - 1) <<
1733 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1734 }
1735}
1736
1737static void
1738mwl8k_set_caps(struct ieee80211_hw *hw, u32 caps)
1739{
1740 struct mwl8k_priv *priv = hw->priv;
1741
1742 if ((caps & MWL8K_CAP_2GHZ4) || !(caps & MWL8K_CAP_BAND_MASK)) {
1743 mwl8k_setup_2ghz_band(hw);
1744 if (caps & MWL8K_CAP_MIMO)
1745 mwl8k_set_ht_caps(hw, &priv->band_24, caps);
1746 }
1747
1748 if (caps & MWL8K_CAP_5GHZ) {
1749 mwl8k_setup_5ghz_band(hw);
1750 if (caps & MWL8K_CAP_MIMO)
1751 mwl8k_set_ht_caps(hw, &priv->band_50, caps);
1752 }
1753}
1754
1681static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw) 1755static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
1682{ 1756{
1683 struct mwl8k_priv *priv = hw->priv; 1757 struct mwl8k_priv *priv = hw->priv;
@@ -1708,6 +1782,9 @@ static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
1708 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs); 1782 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
1709 priv->fw_rev = le32_to_cpu(cmd->fw_rev); 1783 priv->fw_rev = le32_to_cpu(cmd->fw_rev);
1710 priv->hw_rev = cmd->hw_rev; 1784 priv->hw_rev = cmd->hw_rev;
1785 mwl8k_set_caps(hw, le32_to_cpu(cmd->caps));
1786 priv->ap_macids_supported = 0x00000000;
1787 priv->sta_macids_supported = 0x00000001;
1711 } 1788 }
1712 1789
1713 kfree(cmd); 1790 kfree(cmd);
@@ -1761,6 +1838,9 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
1761 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs); 1838 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
1762 priv->fw_rev = le32_to_cpu(cmd->fw_rev); 1839 priv->fw_rev = le32_to_cpu(cmd->fw_rev);
1763 priv->hw_rev = cmd->hw_rev; 1840 priv->hw_rev = cmd->hw_rev;
1841 mwl8k_setup_2ghz_band(hw);
1842 priv->ap_macids_supported = 0x000000ff;
1843 priv->sta_macids_supported = 0x00000000;
1764 1844
1765 off = le32_to_cpu(cmd->wcbbase0) & 0xffff; 1845 off = le32_to_cpu(cmd->wcbbase0) & 0xffff;
1766 iowrite32(cpu_to_le32(priv->txq[0].txd_dma), priv->sram + off); 1846 iowrite32(cpu_to_le32(priv->txq[0].txd_dma), priv->sram + off);
@@ -1806,7 +1886,9 @@ struct mwl8k_cmd_set_hw_spec {
1806 __le32 total_rxd; 1886 __le32 total_rxd;
1807} __attribute__((packed)); 1887} __attribute__((packed));
1808 1888
1809#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080 1889#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
1890#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020
1891#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010
1810 1892
1811static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw) 1893static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
1812{ 1894{
@@ -1827,7 +1909,9 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
1827 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES); 1909 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
1828 for (i = 0; i < MWL8K_TX_QUEUES; i++) 1910 for (i = 0; i < MWL8K_TX_QUEUES; i++)
1829 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma); 1911 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma);
1830 cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT); 1912 cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
1913 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
1914 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON);
1831 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS); 1915 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
1832 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS); 1916 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
1833 1917
@@ -1897,9 +1981,9 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
1897} 1981}
1898 1982
1899/* 1983/*
1900 * CMD_802_11_GET_STAT. 1984 * CMD_GET_STAT.
1901 */ 1985 */
1902struct mwl8k_cmd_802_11_get_stat { 1986struct mwl8k_cmd_get_stat {
1903 struct mwl8k_cmd_pkt header; 1987 struct mwl8k_cmd_pkt header;
1904 __le32 stats[64]; 1988 __le32 stats[64];
1905} __attribute__((packed)); 1989} __attribute__((packed));
@@ -1909,10 +1993,10 @@ struct mwl8k_cmd_802_11_get_stat {
1909#define MWL8K_STAT_FCS_ERROR 24 1993#define MWL8K_STAT_FCS_ERROR 24
1910#define MWL8K_STAT_RTS_SUCCESS 11 1994#define MWL8K_STAT_RTS_SUCCESS 11
1911 1995
1912static int mwl8k_cmd_802_11_get_stat(struct ieee80211_hw *hw, 1996static int mwl8k_cmd_get_stat(struct ieee80211_hw *hw,
1913 struct ieee80211_low_level_stats *stats) 1997 struct ieee80211_low_level_stats *stats)
1914{ 1998{
1915 struct mwl8k_cmd_802_11_get_stat *cmd; 1999 struct mwl8k_cmd_get_stat *cmd;
1916 int rc; 2000 int rc;
1917 2001
1918 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2002 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -1939,9 +2023,9 @@ static int mwl8k_cmd_802_11_get_stat(struct ieee80211_hw *hw,
1939} 2023}
1940 2024
1941/* 2025/*
1942 * CMD_802_11_RADIO_CONTROL. 2026 * CMD_RADIO_CONTROL.
1943 */ 2027 */
1944struct mwl8k_cmd_802_11_radio_control { 2028struct mwl8k_cmd_radio_control {
1945 struct mwl8k_cmd_pkt header; 2029 struct mwl8k_cmd_pkt header;
1946 __le16 action; 2030 __le16 action;
1947 __le16 control; 2031 __le16 control;
@@ -1949,10 +2033,10 @@ struct mwl8k_cmd_802_11_radio_control {
1949} __attribute__((packed)); 2033} __attribute__((packed));
1950 2034
1951static int 2035static int
1952mwl8k_cmd_802_11_radio_control(struct ieee80211_hw *hw, bool enable, bool force) 2036mwl8k_cmd_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
1953{ 2037{
1954 struct mwl8k_priv *priv = hw->priv; 2038 struct mwl8k_priv *priv = hw->priv;
1955 struct mwl8k_cmd_802_11_radio_control *cmd; 2039 struct mwl8k_cmd_radio_control *cmd;
1956 int rc; 2040 int rc;
1957 2041
1958 if (enable == priv->radio_on && !force) 2042 if (enable == priv->radio_on && !force)
@@ -1977,36 +2061,32 @@ mwl8k_cmd_802_11_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
1977 return rc; 2061 return rc;
1978} 2062}
1979 2063
1980static int mwl8k_cmd_802_11_radio_disable(struct ieee80211_hw *hw) 2064static int mwl8k_cmd_radio_disable(struct ieee80211_hw *hw)
1981{ 2065{
1982 return mwl8k_cmd_802_11_radio_control(hw, 0, 0); 2066 return mwl8k_cmd_radio_control(hw, 0, 0);
1983} 2067}
1984 2068
1985static int mwl8k_cmd_802_11_radio_enable(struct ieee80211_hw *hw) 2069static int mwl8k_cmd_radio_enable(struct ieee80211_hw *hw)
1986{ 2070{
1987 return mwl8k_cmd_802_11_radio_control(hw, 1, 0); 2071 return mwl8k_cmd_radio_control(hw, 1, 0);
1988} 2072}
1989 2073
1990static int 2074static int
1991mwl8k_set_radio_preamble(struct ieee80211_hw *hw, bool short_preamble) 2075mwl8k_set_radio_preamble(struct ieee80211_hw *hw, bool short_preamble)
1992{ 2076{
1993 struct mwl8k_priv *priv; 2077 struct mwl8k_priv *priv = hw->priv;
1994
1995 if (hw == NULL || hw->priv == NULL)
1996 return -EINVAL;
1997 priv = hw->priv;
1998 2078
1999 priv->radio_short_preamble = short_preamble; 2079 priv->radio_short_preamble = short_preamble;
2000 2080
2001 return mwl8k_cmd_802_11_radio_control(hw, 1, 1); 2081 return mwl8k_cmd_radio_control(hw, 1, 1);
2002} 2082}
2003 2083
2004/* 2084/*
2005 * CMD_802_11_RF_TX_POWER. 2085 * CMD_RF_TX_POWER.
2006 */ 2086 */
2007#define MWL8K_TX_POWER_LEVEL_TOTAL 8 2087#define MWL8K_TX_POWER_LEVEL_TOTAL 8
2008 2088
2009struct mwl8k_cmd_802_11_rf_tx_power { 2089struct mwl8k_cmd_rf_tx_power {
2010 struct mwl8k_cmd_pkt header; 2090 struct mwl8k_cmd_pkt header;
2011 __le16 action; 2091 __le16 action;
2012 __le16 support_level; 2092 __le16 support_level;
@@ -2015,9 +2095,9 @@ struct mwl8k_cmd_802_11_rf_tx_power {
2015 __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL]; 2095 __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL];
2016} __attribute__((packed)); 2096} __attribute__((packed));
2017 2097
2018static int mwl8k_cmd_802_11_rf_tx_power(struct ieee80211_hw *hw, int dBm) 2098static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm)
2019{ 2099{
2020 struct mwl8k_cmd_802_11_rf_tx_power *cmd; 2100 struct mwl8k_cmd_rf_tx_power *cmd;
2021 int rc; 2101 int rc;
2022 2102
2023 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2103 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -2069,6 +2149,36 @@ mwl8k_cmd_rf_antenna(struct ieee80211_hw *hw, int antenna, int mask)
2069} 2149}
2070 2150
2071/* 2151/*
2152 * CMD_SET_BEACON.
2153 */
2154struct mwl8k_cmd_set_beacon {
2155 struct mwl8k_cmd_pkt header;
2156 __le16 beacon_len;
2157 __u8 beacon[0];
2158};
2159
2160static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw,
2161 struct ieee80211_vif *vif, u8 *beacon, int len)
2162{
2163 struct mwl8k_cmd_set_beacon *cmd;
2164 int rc;
2165
2166 cmd = kzalloc(sizeof(*cmd) + len, GFP_KERNEL);
2167 if (cmd == NULL)
2168 return -ENOMEM;
2169
2170 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_BEACON);
2171 cmd->header.length = cpu_to_le16(sizeof(*cmd) + len);
2172 cmd->beacon_len = cpu_to_le16(len);
2173 memcpy(cmd->beacon, beacon, len);
2174
2175 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
2176 kfree(cmd);
2177
2178 return rc;
2179}
2180
2181/*
2072 * CMD_SET_PRE_SCAN. 2182 * CMD_SET_PRE_SCAN.
2073 */ 2183 */
2074struct mwl8k_cmd_set_pre_scan { 2184struct mwl8k_cmd_set_pre_scan {
@@ -2103,7 +2213,7 @@ struct mwl8k_cmd_set_post_scan {
2103} __attribute__((packed)); 2213} __attribute__((packed));
2104 2214
2105static int 2215static int
2106mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, __u8 *mac) 2216mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac)
2107{ 2217{
2108 struct mwl8k_cmd_set_post_scan *cmd; 2218 struct mwl8k_cmd_set_post_scan *cmd;
2109 int rc; 2219 int rc;
@@ -2134,8 +2244,9 @@ struct mwl8k_cmd_set_rf_channel {
2134} __attribute__((packed)); 2244} __attribute__((packed));
2135 2245
2136static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw, 2246static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
2137 struct ieee80211_channel *channel) 2247 struct ieee80211_conf *conf)
2138{ 2248{
2249 struct ieee80211_channel *channel = conf->channel;
2139 struct mwl8k_cmd_set_rf_channel *cmd; 2250 struct mwl8k_cmd_set_rf_channel *cmd;
2140 int rc; 2251 int rc;
2141 2252
@@ -2147,10 +2258,19 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
2147 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2258 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2148 cmd->action = cpu_to_le16(MWL8K_CMD_SET); 2259 cmd->action = cpu_to_le16(MWL8K_CMD_SET);
2149 cmd->current_channel = channel->hw_value; 2260 cmd->current_channel = channel->hw_value;
2261
2150 if (channel->band == IEEE80211_BAND_2GHZ) 2262 if (channel->band == IEEE80211_BAND_2GHZ)
2151 cmd->channel_flags = cpu_to_le32(0x00000081); 2263 cmd->channel_flags |= cpu_to_le32(0x00000001);
2152 else 2264 else if (channel->band == IEEE80211_BAND_5GHZ)
2153 cmd->channel_flags = cpu_to_le32(0x00000000); 2265 cmd->channel_flags |= cpu_to_le32(0x00000004);
2266
2267 if (conf->channel_type == NL80211_CHAN_NO_HT ||
2268 conf->channel_type == NL80211_CHAN_HT20)
2269 cmd->channel_flags |= cpu_to_le32(0x00000080);
2270 else if (conf->channel_type == NL80211_CHAN_HT40MINUS)
2271 cmd->channel_flags |= cpu_to_le32(0x000001900);
2272 else if (conf->channel_type == NL80211_CHAN_HT40PLUS)
2273 cmd->channel_flags |= cpu_to_le32(0x000000900);
2154 2274
2155 rc = mwl8k_post_cmd(hw, &cmd->header); 2275 rc = mwl8k_post_cmd(hw, &cmd->header);
2156 kfree(cmd); 2276 kfree(cmd);
@@ -2159,85 +2279,75 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
2159} 2279}
2160 2280
2161/* 2281/*
2162 * CMD_SET_SLOT. 2282 * CMD_SET_AID.
2163 */ 2283 */
2164struct mwl8k_cmd_set_slot { 2284#define MWL8K_FRAME_PROT_DISABLED 0x00
2165 struct mwl8k_cmd_pkt header; 2285#define MWL8K_FRAME_PROT_11G 0x07
2166 __le16 action; 2286#define MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY 0x02
2167 __u8 short_slot; 2287#define MWL8K_FRAME_PROT_11N_HT_ALL 0x06
2168} __attribute__((packed));
2169
2170static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
2171{
2172 struct mwl8k_cmd_set_slot *cmd;
2173 int rc;
2174
2175 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2176 if (cmd == NULL)
2177 return -ENOMEM;
2178
2179 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_SLOT);
2180 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2181 cmd->action = cpu_to_le16(MWL8K_CMD_SET);
2182 cmd->short_slot = short_slot_time;
2183
2184 rc = mwl8k_post_cmd(hw, &cmd->header);
2185 kfree(cmd);
2186 2288
2187 return rc; 2289struct mwl8k_cmd_update_set_aid {
2188} 2290 struct mwl8k_cmd_pkt header;
2291 __le16 aid;
2189 2292
2190/* 2293 /* AP's MAC address (BSSID) */
2191 * CMD_MIMO_CONFIG. 2294 __u8 bssid[ETH_ALEN];
2192 */ 2295 __le16 protection_mode;
2193struct mwl8k_cmd_mimo_config { 2296 __u8 supp_rates[14];
2194 struct mwl8k_cmd_pkt header;
2195 __le32 action;
2196 __u8 rx_antenna_map;
2197 __u8 tx_antenna_map;
2198} __attribute__((packed)); 2297} __attribute__((packed));
2199 2298
2200static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx) 2299static void legacy_rate_mask_to_array(u8 *rates, u32 mask)
2201{ 2300{
2202 struct mwl8k_cmd_mimo_config *cmd; 2301 int i;
2203 int rc; 2302 int j;
2204
2205 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2206 if (cmd == NULL)
2207 return -ENOMEM;
2208
2209 cmd->header.code = cpu_to_le16(MWL8K_CMD_MIMO_CONFIG);
2210 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2211 cmd->action = cpu_to_le32((u32)MWL8K_CMD_SET);
2212 cmd->rx_antenna_map = rx;
2213 cmd->tx_antenna_map = tx;
2214 2303
2215 rc = mwl8k_post_cmd(hw, &cmd->header); 2304 /*
2216 kfree(cmd); 2305 * Clear nonstandard rates 4 and 13.
2306 */
2307 mask &= 0x1fef;
2217 2308
2218 return rc; 2309 for (i = 0, j = 0; i < 14; i++) {
2310 if (mask & (1 << i))
2311 rates[j++] = mwl8k_rates_24[i].hw_value;
2312 }
2219} 2313}
2220 2314
2221/* 2315static int
2222 * CMD_ENABLE_SNIFFER. 2316mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
2223 */ 2317 struct ieee80211_vif *vif, u32 legacy_rate_mask)
2224struct mwl8k_cmd_enable_sniffer {
2225 struct mwl8k_cmd_pkt header;
2226 __le32 action;
2227} __attribute__((packed));
2228
2229static int mwl8k_enable_sniffer(struct ieee80211_hw *hw, bool enable)
2230{ 2318{
2231 struct mwl8k_cmd_enable_sniffer *cmd; 2319 struct mwl8k_cmd_update_set_aid *cmd;
2320 u16 prot_mode;
2232 int rc; 2321 int rc;
2233 2322
2234 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2323 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2235 if (cmd == NULL) 2324 if (cmd == NULL)
2236 return -ENOMEM; 2325 return -ENOMEM;
2237 2326
2238 cmd->header.code = cpu_to_le16(MWL8K_CMD_ENABLE_SNIFFER); 2327 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID);
2239 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2328 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2240 cmd->action = cpu_to_le32(!!enable); 2329 cmd->aid = cpu_to_le16(vif->bss_conf.aid);
2330 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
2331
2332 if (vif->bss_conf.use_cts_prot) {
2333 prot_mode = MWL8K_FRAME_PROT_11G;
2334 } else {
2335 switch (vif->bss_conf.ht_operation_mode &
2336 IEEE80211_HT_OP_MODE_PROTECTION) {
2337 case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
2338 prot_mode = MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY;
2339 break;
2340 case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
2341 prot_mode = MWL8K_FRAME_PROT_11N_HT_ALL;
2342 break;
2343 default:
2344 prot_mode = MWL8K_FRAME_PROT_DISABLED;
2345 break;
2346 }
2347 }
2348 cmd->protection_mode = cpu_to_le16(prot_mode);
2349
2350 legacy_rate_mask_to_array(cmd->supp_rates, legacy_rate_mask);
2241 2351
2242 rc = mwl8k_post_cmd(hw, &cmd->header); 2352 rc = mwl8k_post_cmd(hw, &cmd->header);
2243 kfree(cmd); 2353 kfree(cmd);
@@ -2246,37 +2356,32 @@ static int mwl8k_enable_sniffer(struct ieee80211_hw *hw, bool enable)
2246} 2356}
2247 2357
2248/* 2358/*
2249 * CMD_SET_MAC_ADDR. 2359 * CMD_SET_RATE.
2250 */ 2360 */
2251struct mwl8k_cmd_set_mac_addr { 2361struct mwl8k_cmd_set_rate {
2252 struct mwl8k_cmd_pkt header; 2362 struct mwl8k_cmd_pkt header;
2253 union { 2363 __u8 legacy_rates[14];
2254 struct { 2364
2255 __le16 mac_type; 2365 /* Bitmap for supported MCS codes. */
2256 __u8 mac_addr[ETH_ALEN]; 2366 __u8 mcs_set[16];
2257 } mbss; 2367 __u8 reserved[16];
2258 __u8 mac_addr[ETH_ALEN];
2259 };
2260} __attribute__((packed)); 2368} __attribute__((packed));
2261 2369
2262static int mwl8k_set_mac_addr(struct ieee80211_hw *hw, u8 *mac) 2370static int
2371mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2372 u32 legacy_rate_mask, u8 *mcs_rates)
2263{ 2373{
2264 struct mwl8k_priv *priv = hw->priv; 2374 struct mwl8k_cmd_set_rate *cmd;
2265 struct mwl8k_cmd_set_mac_addr *cmd;
2266 int rc; 2375 int rc;
2267 2376
2268 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2377 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2269 if (cmd == NULL) 2378 if (cmd == NULL)
2270 return -ENOMEM; 2379 return -ENOMEM;
2271 2380
2272 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR); 2381 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATE);
2273 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2382 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2274 if (priv->ap_fw) { 2383 legacy_rate_mask_to_array(cmd->legacy_rates, legacy_rate_mask);
2275 cmd->mbss.mac_type = 0; 2384 memcpy(cmd->mcs_set, mcs_rates, 16);
2276 memcpy(cmd->mbss.mac_addr, mac, ETH_ALEN);
2277 } else {
2278 memcpy(cmd->mac_addr, mac, ETH_ALEN);
2279 }
2280 2385
2281 rc = mwl8k_post_cmd(hw, &cmd->header); 2386 rc = mwl8k_post_cmd(hw, &cmd->header);
2282 kfree(cmd); 2387 kfree(cmd);
@@ -2284,29 +2389,40 @@ static int mwl8k_set_mac_addr(struct ieee80211_hw *hw, u8 *mac)
2284 return rc; 2389 return rc;
2285} 2390}
2286 2391
2287
2288/* 2392/*
2289 * CMD_SET_RATEADAPT_MODE. 2393 * CMD_FINALIZE_JOIN.
2290 */ 2394 */
2291struct mwl8k_cmd_set_rate_adapt_mode { 2395#define MWL8K_FJ_BEACON_MAXLEN 128
2396
2397struct mwl8k_cmd_finalize_join {
2292 struct mwl8k_cmd_pkt header; 2398 struct mwl8k_cmd_pkt header;
2293 __le16 action; 2399 __le32 sleep_interval; /* Number of beacon periods to sleep */
2294 __le16 mode; 2400 __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
2295} __attribute__((packed)); 2401} __attribute__((packed));
2296 2402
2297static int mwl8k_cmd_setrateadaptmode(struct ieee80211_hw *hw, __u16 mode) 2403static int mwl8k_cmd_finalize_join(struct ieee80211_hw *hw, void *frame,
2404 int framelen, int dtim)
2298{ 2405{
2299 struct mwl8k_cmd_set_rate_adapt_mode *cmd; 2406 struct mwl8k_cmd_finalize_join *cmd;
2407 struct ieee80211_mgmt *payload = frame;
2408 int payload_len;
2300 int rc; 2409 int rc;
2301 2410
2302 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2411 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2303 if (cmd == NULL) 2412 if (cmd == NULL)
2304 return -ENOMEM; 2413 return -ENOMEM;
2305 2414
2306 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATEADAPT_MODE); 2415 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_FINALIZE_JOIN);
2307 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2416 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2308 cmd->action = cpu_to_le16(MWL8K_CMD_SET); 2417 cmd->sleep_interval = cpu_to_le32(dtim ? dtim : 1);
2309 cmd->mode = cpu_to_le16(mode); 2418
2419 payload_len = framelen - ieee80211_hdrlen(payload->frame_control);
2420 if (payload_len < 0)
2421 payload_len = 0;
2422 else if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
2423 payload_len = MWL8K_FJ_BEACON_MAXLEN;
2424
2425 memcpy(cmd->beacon_data, &payload->u.beacon, payload_len);
2310 2426
2311 rc = mwl8k_post_cmd(hw, &cmd->header); 2427 rc = mwl8k_post_cmd(hw, &cmd->header);
2312 kfree(cmd); 2428 kfree(cmd);
@@ -2315,59 +2431,57 @@ static int mwl8k_cmd_setrateadaptmode(struct ieee80211_hw *hw, __u16 mode)
2315} 2431}
2316 2432
2317/* 2433/*
2318 * CMD_SET_WMM_MODE. 2434 * CMD_SET_RTS_THRESHOLD.
2319 */ 2435 */
2320struct mwl8k_cmd_set_wmm { 2436struct mwl8k_cmd_set_rts_threshold {
2321 struct mwl8k_cmd_pkt header; 2437 struct mwl8k_cmd_pkt header;
2322 __le16 action; 2438 __le16 action;
2439 __le16 threshold;
2323} __attribute__((packed)); 2440} __attribute__((packed));
2324 2441
2325static int mwl8k_set_wmm(struct ieee80211_hw *hw, bool enable) 2442static int
2443mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh)
2326{ 2444{
2327 struct mwl8k_priv *priv = hw->priv; 2445 struct mwl8k_cmd_set_rts_threshold *cmd;
2328 struct mwl8k_cmd_set_wmm *cmd;
2329 int rc; 2446 int rc;
2330 2447
2331 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2448 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2332 if (cmd == NULL) 2449 if (cmd == NULL)
2333 return -ENOMEM; 2450 return -ENOMEM;
2334 2451
2335 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_WMM_MODE); 2452 cmd->header.code = cpu_to_le16(MWL8K_CMD_RTS_THRESHOLD);
2336 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2453 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2337 cmd->action = cpu_to_le16(!!enable); 2454 cmd->action = cpu_to_le16(MWL8K_CMD_SET);
2455 cmd->threshold = cpu_to_le16(rts_thresh);
2338 2456
2339 rc = mwl8k_post_cmd(hw, &cmd->header); 2457 rc = mwl8k_post_cmd(hw, &cmd->header);
2340 kfree(cmd); 2458 kfree(cmd);
2341 2459
2342 if (!rc)
2343 priv->wmm_enabled = enable;
2344
2345 return rc; 2460 return rc;
2346} 2461}
2347 2462
2348/* 2463/*
2349 * CMD_SET_RTS_THRESHOLD. 2464 * CMD_SET_SLOT.
2350 */ 2465 */
2351struct mwl8k_cmd_rts_threshold { 2466struct mwl8k_cmd_set_slot {
2352 struct mwl8k_cmd_pkt header; 2467 struct mwl8k_cmd_pkt header;
2353 __le16 action; 2468 __le16 action;
2354 __le16 threshold; 2469 __u8 short_slot;
2355} __attribute__((packed)); 2470} __attribute__((packed));
2356 2471
2357static int mwl8k_rts_threshold(struct ieee80211_hw *hw, 2472static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
2358 u16 action, u16 threshold)
2359{ 2473{
2360 struct mwl8k_cmd_rts_threshold *cmd; 2474 struct mwl8k_cmd_set_slot *cmd;
2361 int rc; 2475 int rc;
2362 2476
2363 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2477 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2364 if (cmd == NULL) 2478 if (cmd == NULL)
2365 return -ENOMEM; 2479 return -ENOMEM;
2366 2480
2367 cmd->header.code = cpu_to_le16(MWL8K_CMD_RTS_THRESHOLD); 2481 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_SLOT);
2368 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2482 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2369 cmd->action = cpu_to_le16(action); 2483 cmd->action = cpu_to_le16(MWL8K_CMD_SET);
2370 cmd->threshold = cpu_to_le16(threshold); 2484 cmd->short_slot = short_slot_time;
2371 2485
2372 rc = mwl8k_post_cmd(hw, &cmd->header); 2486 rc = mwl8k_post_cmd(hw, &cmd->header);
2373 kfree(cmd); 2487 kfree(cmd);
@@ -2426,9 +2540,9 @@ struct mwl8k_cmd_set_edca_params {
2426 MWL8K_SET_EDCA_AIFS) 2540 MWL8K_SET_EDCA_AIFS)
2427 2541
2428static int 2542static int
2429mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum, 2543mwl8k_cmd_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
2430 __u16 cw_min, __u16 cw_max, 2544 __u16 cw_min, __u16 cw_max,
2431 __u8 aifs, __u16 txop) 2545 __u8 aifs, __u16 txop)
2432{ 2546{
2433 struct mwl8k_priv *priv = hw->priv; 2547 struct mwl8k_priv *priv = hw->priv;
2434 struct mwl8k_cmd_set_edca_params *cmd; 2548 struct mwl8k_cmd_set_edca_params *cmd;
@@ -2438,12 +2552,6 @@ mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
2438 if (cmd == NULL) 2552 if (cmd == NULL)
2439 return -ENOMEM; 2553 return -ENOMEM;
2440 2554
2441 /*
2442 * Queues 0 (BE) and 1 (BK) are swapped in hardware for
2443 * this call.
2444 */
2445 qnum ^= !(qnum >> 1);
2446
2447 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_EDCA_PARAMS); 2555 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_EDCA_PARAMS);
2448 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2556 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2449 cmd->action = cpu_to_le16(MWL8K_SET_EDCA_ALL); 2557 cmd->action = cpu_to_le16(MWL8K_SET_EDCA_ALL);
@@ -2467,170 +2575,259 @@ mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
2467} 2575}
2468 2576
2469/* 2577/*
2470 * CMD_FINALIZE_JOIN. 2578 * CMD_SET_WMM_MODE.
2471 */ 2579 */
2472#define MWL8K_FJ_BEACON_MAXLEN 128 2580struct mwl8k_cmd_set_wmm_mode {
2473
2474struct mwl8k_cmd_finalize_join {
2475 struct mwl8k_cmd_pkt header; 2581 struct mwl8k_cmd_pkt header;
2476 __le32 sleep_interval; /* Number of beacon periods to sleep */ 2582 __le16 action;
2477 __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
2478} __attribute__((packed)); 2583} __attribute__((packed));
2479 2584
2480static int mwl8k_finalize_join(struct ieee80211_hw *hw, void *frame, 2585static int mwl8k_cmd_set_wmm_mode(struct ieee80211_hw *hw, bool enable)
2481 int framelen, int dtim)
2482{ 2586{
2483 struct mwl8k_cmd_finalize_join *cmd; 2587 struct mwl8k_priv *priv = hw->priv;
2484 struct ieee80211_mgmt *payload = frame; 2588 struct mwl8k_cmd_set_wmm_mode *cmd;
2485 int payload_len;
2486 int rc; 2589 int rc;
2487 2590
2488 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2591 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2489 if (cmd == NULL) 2592 if (cmd == NULL)
2490 return -ENOMEM; 2593 return -ENOMEM;
2491 2594
2492 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_FINALIZE_JOIN); 2595 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_WMM_MODE);
2493 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2596 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2494 cmd->sleep_interval = cpu_to_le32(dtim ? dtim : 1); 2597 cmd->action = cpu_to_le16(!!enable);
2495
2496 payload_len = framelen - ieee80211_hdrlen(payload->frame_control);
2497 if (payload_len < 0)
2498 payload_len = 0;
2499 else if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
2500 payload_len = MWL8K_FJ_BEACON_MAXLEN;
2501
2502 memcpy(cmd->beacon_data, &payload->u.beacon, payload_len);
2503 2598
2504 rc = mwl8k_post_cmd(hw, &cmd->header); 2599 rc = mwl8k_post_cmd(hw, &cmd->header);
2505 kfree(cmd); 2600 kfree(cmd);
2506 2601
2602 if (!rc)
2603 priv->wmm_enabled = enable;
2604
2507 return rc; 2605 return rc;
2508} 2606}
2509 2607
2510/* 2608/*
2511 * CMD_UPDATE_STADB. 2609 * CMD_MIMO_CONFIG.
2512 */ 2610 */
2513struct mwl8k_cmd_update_sta_db { 2611struct mwl8k_cmd_mimo_config {
2514 struct mwl8k_cmd_pkt header; 2612 struct mwl8k_cmd_pkt header;
2613 __le32 action;
2614 __u8 rx_antenna_map;
2615 __u8 tx_antenna_map;
2616} __attribute__((packed));
2515 2617
2516 /* See STADB_ACTION_TYPE */ 2618static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx)
2517 __le32 action; 2619{
2620 struct mwl8k_cmd_mimo_config *cmd;
2621 int rc;
2518 2622
2519 /* Peer MAC address */ 2623 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2520 __u8 peer_addr[ETH_ALEN]; 2624 if (cmd == NULL)
2625 return -ENOMEM;
2521 2626
2522 __le32 reserved; 2627 cmd->header.code = cpu_to_le16(MWL8K_CMD_MIMO_CONFIG);
2628 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2629 cmd->action = cpu_to_le32((u32)MWL8K_CMD_SET);
2630 cmd->rx_antenna_map = rx;
2631 cmd->tx_antenna_map = tx;
2523 2632
2524 /* Peer info - valid during add/update. */ 2633 rc = mwl8k_post_cmd(hw, &cmd->header);
2525 struct peer_capability_info peer_info; 2634 kfree(cmd);
2635
2636 return rc;
2637}
2638
2639/*
2640 * CMD_USE_FIXED_RATE (STA version).
2641 */
2642struct mwl8k_cmd_use_fixed_rate_sta {
2643 struct mwl8k_cmd_pkt header;
2644 __le32 action;
2645 __le32 allow_rate_drop;
2646 __le32 num_rates;
2647 struct {
2648 __le32 is_ht_rate;
2649 __le32 enable_retry;
2650 __le32 rate;
2651 __le32 retry_count;
2652 } rate_entry[8];
2653 __le32 rate_type;
2654 __le32 reserved1;
2655 __le32 reserved2;
2526} __attribute__((packed)); 2656} __attribute__((packed));
2527 2657
2528static int mwl8k_cmd_update_sta_db(struct ieee80211_hw *hw, 2658#define MWL8K_USE_AUTO_RATE 0x0002
2529 struct ieee80211_vif *vif, __u32 action) 2659#define MWL8K_UCAST_RATE 0
2660
2661static int mwl8k_cmd_use_fixed_rate_sta(struct ieee80211_hw *hw)
2530{ 2662{
2531 struct mwl8k_vif *mv_vif = MWL8K_VIF(vif); 2663 struct mwl8k_cmd_use_fixed_rate_sta *cmd;
2532 struct ieee80211_bss_conf *info = &mv_vif->bss_info;
2533 struct mwl8k_cmd_update_sta_db *cmd;
2534 struct peer_capability_info *peer_info;
2535 int rc; 2664 int rc;
2536 2665
2537 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2666 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2538 if (cmd == NULL) 2667 if (cmd == NULL)
2539 return -ENOMEM; 2668 return -ENOMEM;
2540 2669
2541 cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB); 2670 cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
2542 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2671 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2672 cmd->action = cpu_to_le32(MWL8K_USE_AUTO_RATE);
2673 cmd->rate_type = cpu_to_le32(MWL8K_UCAST_RATE);
2543 2674
2544 cmd->action = cpu_to_le32(action); 2675 rc = mwl8k_post_cmd(hw, &cmd->header);
2545 peer_info = &cmd->peer_info; 2676 kfree(cmd);
2546 memcpy(cmd->peer_addr, mv_vif->bssid, ETH_ALEN);
2547 2677
2548 switch (action) { 2678 return rc;
2549 case MWL8K_STA_DB_ADD_ENTRY: 2679}
2550 case MWL8K_STA_DB_MODIFY_ENTRY:
2551 /* Build peer_info block */
2552 peer_info->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
2553 peer_info->basic_caps = cpu_to_le16(info->assoc_capability);
2554 memcpy(peer_info->legacy_rates, mwl8k_rateids,
2555 sizeof(mwl8k_rateids));
2556 peer_info->interop = 1;
2557 peer_info->amsdu_enabled = 0;
2558
2559 rc = mwl8k_post_cmd(hw, &cmd->header);
2560 if (rc == 0)
2561 mv_vif->peer_id = peer_info->station_id;
2562 2680
2563 break; 2681/*
2682 * CMD_USE_FIXED_RATE (AP version).
2683 */
2684struct mwl8k_cmd_use_fixed_rate_ap {
2685 struct mwl8k_cmd_pkt header;
2686 __le32 action;
2687 __le32 allow_rate_drop;
2688 __le32 num_rates;
2689 struct mwl8k_rate_entry_ap {
2690 __le32 is_ht_rate;
2691 __le32 enable_retry;
2692 __le32 rate;
2693 __le32 retry_count;
2694 } rate_entry[4];
2695 u8 multicast_rate;
2696 u8 multicast_rate_type;
2697 u8 management_rate;
2698} __attribute__((packed));
2564 2699
2565 case MWL8K_STA_DB_DEL_ENTRY: 2700static int
2566 case MWL8K_STA_DB_FLUSH: 2701mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt)
2567 default: 2702{
2568 rc = mwl8k_post_cmd(hw, &cmd->header); 2703 struct mwl8k_cmd_use_fixed_rate_ap *cmd;
2569 if (rc == 0) 2704 int rc;
2570 mv_vif->peer_id = 0; 2705
2571 break; 2706 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2572 } 2707 if (cmd == NULL)
2708 return -ENOMEM;
2709
2710 cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
2711 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2712 cmd->action = cpu_to_le32(MWL8K_USE_AUTO_RATE);
2713 cmd->multicast_rate = mcast;
2714 cmd->management_rate = mgmt;
2715
2716 rc = mwl8k_post_cmd(hw, &cmd->header);
2573 kfree(cmd); 2717 kfree(cmd);
2574 2718
2575 return rc; 2719 return rc;
2576} 2720}
2577 2721
2578/* 2722/*
2579 * CMD_SET_AID. 2723 * CMD_ENABLE_SNIFFER.
2580 */ 2724 */
2581#define MWL8K_FRAME_PROT_DISABLED 0x00 2725struct mwl8k_cmd_enable_sniffer {
2582#define MWL8K_FRAME_PROT_11G 0x07 2726 struct mwl8k_cmd_pkt header;
2583#define MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY 0x02 2727 __le32 action;
2584#define MWL8K_FRAME_PROT_11N_HT_ALL 0x06
2585
2586struct mwl8k_cmd_update_set_aid {
2587 struct mwl8k_cmd_pkt header;
2588 __le16 aid;
2589
2590 /* AP's MAC address (BSSID) */
2591 __u8 bssid[ETH_ALEN];
2592 __le16 protection_mode;
2593 __u8 supp_rates[14];
2594} __attribute__((packed)); 2728} __attribute__((packed));
2595 2729
2596static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw, 2730static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable)
2597 struct ieee80211_vif *vif)
2598{ 2731{
2599 struct mwl8k_vif *mv_vif = MWL8K_VIF(vif); 2732 struct mwl8k_cmd_enable_sniffer *cmd;
2600 struct ieee80211_bss_conf *info = &mv_vif->bss_info;
2601 struct mwl8k_cmd_update_set_aid *cmd;
2602 u16 prot_mode;
2603 int rc; 2733 int rc;
2604 2734
2605 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2735 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2606 if (cmd == NULL) 2736 if (cmd == NULL)
2607 return -ENOMEM; 2737 return -ENOMEM;
2608 2738
2609 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID); 2739 cmd->header.code = cpu_to_le16(MWL8K_CMD_ENABLE_SNIFFER);
2610 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2740 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2611 cmd->aid = cpu_to_le16(info->aid); 2741 cmd->action = cpu_to_le32(!!enable);
2612 2742
2613 memcpy(cmd->bssid, mv_vif->bssid, ETH_ALEN); 2743 rc = mwl8k_post_cmd(hw, &cmd->header);
2744 kfree(cmd);
2614 2745
2615 if (info->use_cts_prot) { 2746 return rc;
2616 prot_mode = MWL8K_FRAME_PROT_11G; 2747}
2748
2749/*
2750 * CMD_SET_MAC_ADDR.
2751 */
2752struct mwl8k_cmd_set_mac_addr {
2753 struct mwl8k_cmd_pkt header;
2754 union {
2755 struct {
2756 __le16 mac_type;
2757 __u8 mac_addr[ETH_ALEN];
2758 } mbss;
2759 __u8 mac_addr[ETH_ALEN];
2760 };
2761} __attribute__((packed));
2762
2763#define MWL8K_MAC_TYPE_PRIMARY_CLIENT 0
2764#define MWL8K_MAC_TYPE_SECONDARY_CLIENT 1
2765#define MWL8K_MAC_TYPE_PRIMARY_AP 2
2766#define MWL8K_MAC_TYPE_SECONDARY_AP 3
2767
2768static int mwl8k_cmd_set_mac_addr(struct ieee80211_hw *hw,
2769 struct ieee80211_vif *vif, u8 *mac)
2770{
2771 struct mwl8k_priv *priv = hw->priv;
2772 struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
2773 struct mwl8k_cmd_set_mac_addr *cmd;
2774 int mac_type;
2775 int rc;
2776
2777 mac_type = MWL8K_MAC_TYPE_PRIMARY_AP;
2778 if (vif != NULL && vif->type == NL80211_IFTYPE_STATION) {
2779 if (mwl8k_vif->macid + 1 == ffs(priv->sta_macids_supported))
2780 mac_type = MWL8K_MAC_TYPE_PRIMARY_CLIENT;
2781 else
2782 mac_type = MWL8K_MAC_TYPE_SECONDARY_CLIENT;
2783 } else if (vif != NULL && vif->type == NL80211_IFTYPE_AP) {
2784 if (mwl8k_vif->macid + 1 == ffs(priv->ap_macids_supported))
2785 mac_type = MWL8K_MAC_TYPE_PRIMARY_AP;
2786 else
2787 mac_type = MWL8K_MAC_TYPE_SECONDARY_AP;
2788 }
2789
2790 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2791 if (cmd == NULL)
2792 return -ENOMEM;
2793
2794 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR);
2795 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2796 if (priv->ap_fw) {
2797 cmd->mbss.mac_type = cpu_to_le16(mac_type);
2798 memcpy(cmd->mbss.mac_addr, mac, ETH_ALEN);
2617 } else { 2799 } else {
2618 switch (info->ht_operation_mode & 2800 memcpy(cmd->mac_addr, mac, ETH_ALEN);
2619 IEEE80211_HT_OP_MODE_PROTECTION) {
2620 case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
2621 prot_mode = MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY;
2622 break;
2623 case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
2624 prot_mode = MWL8K_FRAME_PROT_11N_HT_ALL;
2625 break;
2626 default:
2627 prot_mode = MWL8K_FRAME_PROT_DISABLED;
2628 break;
2629 }
2630 } 2801 }
2631 cmd->protection_mode = cpu_to_le16(prot_mode);
2632 2802
2633 memcpy(cmd->supp_rates, mwl8k_rateids, sizeof(mwl8k_rateids)); 2803 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
2804 kfree(cmd);
2805
2806 return rc;
2807}
2808
2809/*
2810 * CMD_SET_RATEADAPT_MODE.
2811 */
2812struct mwl8k_cmd_set_rate_adapt_mode {
2813 struct mwl8k_cmd_pkt header;
2814 __le16 action;
2815 __le16 mode;
2816} __attribute__((packed));
2817
2818static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode)
2819{
2820 struct mwl8k_cmd_set_rate_adapt_mode *cmd;
2821 int rc;
2822
2823 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2824 if (cmd == NULL)
2825 return -ENOMEM;
2826
2827 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATEADAPT_MODE);
2828 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2829 cmd->action = cpu_to_le16(MWL8K_CMD_SET);
2830 cmd->mode = cpu_to_le16(mode);
2634 2831
2635 rc = mwl8k_post_cmd(hw, &cmd->header); 2832 rc = mwl8k_post_cmd(hw, &cmd->header);
2636 kfree(cmd); 2833 kfree(cmd);
@@ -2639,115 +2836,255 @@ static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
2639} 2836}
2640 2837
2641/* 2838/*
2642 * CMD_SET_RATE. 2839 * CMD_BSS_START.
2643 */ 2840 */
2644struct mwl8k_cmd_update_rateset { 2841struct mwl8k_cmd_bss_start {
2645 struct mwl8k_cmd_pkt header; 2842 struct mwl8k_cmd_pkt header;
2646 __u8 legacy_rates[14]; 2843 __le32 enable;
2647
2648 /* Bitmap for supported MCS codes. */
2649 __u8 mcs_set[16];
2650 __u8 reserved[16];
2651} __attribute__((packed)); 2844} __attribute__((packed));
2652 2845
2653static int mwl8k_update_rateset(struct ieee80211_hw *hw, 2846static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw,
2654 struct ieee80211_vif *vif) 2847 struct ieee80211_vif *vif, int enable)
2655{ 2848{
2656 struct mwl8k_cmd_update_rateset *cmd; 2849 struct mwl8k_cmd_bss_start *cmd;
2657 int rc; 2850 int rc;
2658 2851
2659 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2852 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2660 if (cmd == NULL) 2853 if (cmd == NULL)
2661 return -ENOMEM; 2854 return -ENOMEM;
2662 2855
2663 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATE); 2856 cmd->header.code = cpu_to_le16(MWL8K_CMD_BSS_START);
2664 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2857 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2665 memcpy(cmd->legacy_rates, mwl8k_rateids, sizeof(mwl8k_rateids)); 2858 cmd->enable = cpu_to_le32(enable);
2666 2859
2667 rc = mwl8k_post_cmd(hw, &cmd->header); 2860 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
2668 kfree(cmd); 2861 kfree(cmd);
2669 2862
2670 return rc; 2863 return rc;
2671} 2864}
2672 2865
2673/* 2866/*
2674 * CMD_USE_FIXED_RATE. 2867 * CMD_SET_NEW_STN.
2675 */ 2868 */
2676#define MWL8K_RATE_TABLE_SIZE 8 2869struct mwl8k_cmd_set_new_stn {
2677#define MWL8K_UCAST_RATE 0 2870 struct mwl8k_cmd_pkt header;
2678#define MWL8K_USE_AUTO_RATE 0x0002 2871 __le16 aid;
2872 __u8 mac_addr[6];
2873 __le16 stn_id;
2874 __le16 action;
2875 __le16 rsvd;
2876 __le32 legacy_rates;
2877 __u8 ht_rates[4];
2878 __le16 cap_info;
2879 __le16 ht_capabilities_info;
2880 __u8 mac_ht_param_info;
2881 __u8 rev;
2882 __u8 control_channel;
2883 __u8 add_channel;
2884 __le16 op_mode;
2885 __le16 stbc;
2886 __u8 add_qos_info;
2887 __u8 is_qos_sta;
2888 __le32 fw_sta_ptr;
2889} __attribute__((packed));
2890
2891#define MWL8K_STA_ACTION_ADD 0
2892#define MWL8K_STA_ACTION_REMOVE 2
2893
2894static int mwl8k_cmd_set_new_stn_add(struct ieee80211_hw *hw,
2895 struct ieee80211_vif *vif,
2896 struct ieee80211_sta *sta)
2897{
2898 struct mwl8k_cmd_set_new_stn *cmd;
2899 u32 rates;
2900 int rc;
2901
2902 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2903 if (cmd == NULL)
2904 return -ENOMEM;
2905
2906 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
2907 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2908 cmd->aid = cpu_to_le16(sta->aid);
2909 memcpy(cmd->mac_addr, sta->addr, ETH_ALEN);
2910 cmd->stn_id = cpu_to_le16(sta->aid);
2911 cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD);
2912 if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
2913 rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
2914 else
2915 rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
2916 cmd->legacy_rates = cpu_to_le32(rates);
2917 if (sta->ht_cap.ht_supported) {
2918 cmd->ht_rates[0] = sta->ht_cap.mcs.rx_mask[0];
2919 cmd->ht_rates[1] = sta->ht_cap.mcs.rx_mask[1];
2920 cmd->ht_rates[2] = sta->ht_cap.mcs.rx_mask[2];
2921 cmd->ht_rates[3] = sta->ht_cap.mcs.rx_mask[3];
2922 cmd->ht_capabilities_info = cpu_to_le16(sta->ht_cap.cap);
2923 cmd->mac_ht_param_info = (sta->ht_cap.ampdu_factor & 3) |
2924 ((sta->ht_cap.ampdu_density & 7) << 2);
2925 cmd->is_qos_sta = 1;
2926 }
2927
2928 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
2929 kfree(cmd);
2930
2931 return rc;
2932}
2933
2934static int mwl8k_cmd_set_new_stn_add_self(struct ieee80211_hw *hw,
2935 struct ieee80211_vif *vif)
2936{
2937 struct mwl8k_cmd_set_new_stn *cmd;
2938 int rc;
2679 2939
2680struct mwl8k_rate_entry { 2940 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2681 /* Set to 1 if HT rate, 0 if legacy. */ 2941 if (cmd == NULL)
2682 __le32 is_ht_rate; 2942 return -ENOMEM;
2683 2943
2684 /* Set to 1 to use retry_count field. */ 2944 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
2685 __le32 enable_retry; 2945 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2946 memcpy(cmd->mac_addr, vif->addr, ETH_ALEN);
2686 2947
2687 /* Specified legacy rate or MCS. */ 2948 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
2688 __le32 rate; 2949 kfree(cmd);
2689 2950
2690 /* Number of allowed retries. */ 2951 return rc;
2691 __le32 retry_count; 2952}
2953
2954static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw,
2955 struct ieee80211_vif *vif, u8 *addr)
2956{
2957 struct mwl8k_cmd_set_new_stn *cmd;
2958 int rc;
2959
2960 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2961 if (cmd == NULL)
2962 return -ENOMEM;
2963
2964 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
2965 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2966 memcpy(cmd->mac_addr, addr, ETH_ALEN);
2967 cmd->action = cpu_to_le16(MWL8K_STA_ACTION_REMOVE);
2968
2969 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
2970 kfree(cmd);
2971
2972 return rc;
2973}
2974
2975/*
2976 * CMD_UPDATE_STADB.
2977 */
2978struct ewc_ht_info {
2979 __le16 control1;
2980 __le16 control2;
2981 __le16 control3;
2692} __attribute__((packed)); 2982} __attribute__((packed));
2693 2983
2694struct mwl8k_rate_table { 2984struct peer_capability_info {
2695 /* 1 to allow specified rate and below */ 2985 /* Peer type - AP vs. STA. */
2696 __le32 allow_rate_drop; 2986 __u8 peer_type;
2697 __le32 num_rates; 2987
2698 struct mwl8k_rate_entry rate_entry[MWL8K_RATE_TABLE_SIZE]; 2988 /* Basic 802.11 capabilities from assoc resp. */
2989 __le16 basic_caps;
2990
2991 /* Set if peer supports 802.11n high throughput (HT). */
2992 __u8 ht_support;
2993
2994 /* Valid if HT is supported. */
2995 __le16 ht_caps;
2996 __u8 extended_ht_caps;
2997 struct ewc_ht_info ewc_info;
2998
2999 /* Legacy rate table. Intersection of our rates and peer rates. */
3000 __u8 legacy_rates[12];
3001
3002 /* HT rate table. Intersection of our rates and peer rates. */
3003 __u8 ht_rates[16];
3004 __u8 pad[16];
3005
3006 /* If set, interoperability mode, no proprietary extensions. */
3007 __u8 interop;
3008 __u8 pad2;
3009 __u8 station_id;
3010 __le16 amsdu_enabled;
2699} __attribute__((packed)); 3011} __attribute__((packed));
2700 3012
2701struct mwl8k_cmd_use_fixed_rate { 3013struct mwl8k_cmd_update_stadb {
2702 struct mwl8k_cmd_pkt header; 3014 struct mwl8k_cmd_pkt header;
3015
3016 /* See STADB_ACTION_TYPE */
2703 __le32 action; 3017 __le32 action;
2704 struct mwl8k_rate_table rate_table;
2705 3018
2706 /* Unicast, Broadcast or Multicast */ 3019 /* Peer MAC address */
2707 __le32 rate_type; 3020 __u8 peer_addr[ETH_ALEN];
2708 __le32 reserved1; 3021
2709 __le32 reserved2; 3022 __le32 reserved;
3023
3024 /* Peer info - valid during add/update. */
3025 struct peer_capability_info peer_info;
2710} __attribute__((packed)); 3026} __attribute__((packed));
2711 3027
2712static int mwl8k_cmd_use_fixed_rate(struct ieee80211_hw *hw, 3028#define MWL8K_STA_DB_MODIFY_ENTRY 1
2713 u32 action, u32 rate_type, struct mwl8k_rate_table *rate_table) 3029#define MWL8K_STA_DB_DEL_ENTRY 2
3030
3031/* Peer Entry flags - used to define the type of the peer node */
3032#define MWL8K_PEER_TYPE_ACCESSPOINT 2
3033
3034static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
3035 struct ieee80211_vif *vif,
3036 struct ieee80211_sta *sta)
2714{ 3037{
2715 struct mwl8k_cmd_use_fixed_rate *cmd; 3038 struct mwl8k_cmd_update_stadb *cmd;
2716 int count; 3039 struct peer_capability_info *p;
3040 u32 rates;
2717 int rc; 3041 int rc;
2718 3042
2719 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 3043 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2720 if (cmd == NULL) 3044 if (cmd == NULL)
2721 return -ENOMEM; 3045 return -ENOMEM;
2722 3046
2723 cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE); 3047 cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
2724 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 3048 cmd->header.length = cpu_to_le16(sizeof(*cmd));
3049 cmd->action = cpu_to_le32(MWL8K_STA_DB_MODIFY_ENTRY);
3050 memcpy(cmd->peer_addr, sta->addr, ETH_ALEN);
3051
3052 p = &cmd->peer_info;
3053 p->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
3054 p->basic_caps = cpu_to_le16(vif->bss_conf.assoc_capability);
3055 p->ht_support = sta->ht_cap.ht_supported;
3056 p->ht_caps = sta->ht_cap.cap;
3057 p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) |
3058 ((sta->ht_cap.ampdu_density & 7) << 2);
3059 if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
3060 rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
3061 else
3062 rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
3063 legacy_rate_mask_to_array(p->legacy_rates, rates);
3064 memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16);
3065 p->interop = 1;
3066 p->amsdu_enabled = 0;
2725 3067
2726 cmd->action = cpu_to_le32(action); 3068 rc = mwl8k_post_cmd(hw, &cmd->header);
2727 cmd->rate_type = cpu_to_le32(rate_type); 3069 kfree(cmd);
2728 3070
2729 if (rate_table != NULL) { 3071 return rc ? rc : p->station_id;
2730 /* 3072}
2731 * Copy over each field manually so that endian 3073
2732 * conversion can be done. 3074static int mwl8k_cmd_update_stadb_del(struct ieee80211_hw *hw,
2733 */ 3075 struct ieee80211_vif *vif, u8 *addr)
2734 cmd->rate_table.allow_rate_drop = 3076{
2735 cpu_to_le32(rate_table->allow_rate_drop); 3077 struct mwl8k_cmd_update_stadb *cmd;
2736 cmd->rate_table.num_rates = 3078 int rc;
2737 cpu_to_le32(rate_table->num_rates); 3079
2738 3080 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2739 for (count = 0; count < rate_table->num_rates; count++) { 3081 if (cmd == NULL)
2740 struct mwl8k_rate_entry *dst = 3082 return -ENOMEM;
2741 &cmd->rate_table.rate_entry[count]; 3083
2742 struct mwl8k_rate_entry *src = 3084 cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
2743 &rate_table->rate_entry[count]; 3085 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2744 3086 cmd->action = cpu_to_le32(MWL8K_STA_DB_DEL_ENTRY);
2745 dst->is_ht_rate = cpu_to_le32(src->is_ht_rate); 3087 memcpy(cmd->peer_addr, addr, ETH_ALEN);
2746 dst->enable_retry = cpu_to_le32(src->enable_retry);
2747 dst->rate = cpu_to_le32(src->rate);
2748 dst->retry_count = cpu_to_le32(src->retry_count);
2749 }
2750 }
2751 3088
2752 rc = mwl8k_post_cmd(hw, &cmd->header); 3089 rc = mwl8k_post_cmd(hw, &cmd->header);
2753 kfree(cmd); 3090 kfree(cmd);
@@ -2766,19 +3103,22 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
2766 u32 status; 3103 u32 status;
2767 3104
2768 status = ioread32(priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); 3105 status = ioread32(priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
2769 iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
2770
2771 if (!status) 3106 if (!status)
2772 return IRQ_NONE; 3107 return IRQ_NONE;
2773 3108
2774 if (status & MWL8K_A2H_INT_TX_DONE) 3109 if (status & MWL8K_A2H_INT_TX_DONE) {
2775 tasklet_schedule(&priv->tx_reclaim_task); 3110 status &= ~MWL8K_A2H_INT_TX_DONE;
3111 tasklet_schedule(&priv->poll_tx_task);
3112 }
2776 3113
2777 if (status & MWL8K_A2H_INT_RX_READY) { 3114 if (status & MWL8K_A2H_INT_RX_READY) {
2778 while (rxq_process(hw, 0, 1)) 3115 status &= ~MWL8K_A2H_INT_RX_READY;
2779 rxq_refill(hw, 0, 1); 3116 tasklet_schedule(&priv->poll_rx_task);
2780 } 3117 }
2781 3118
3119 if (status)
3120 iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
3121
2782 if (status & MWL8K_A2H_INT_OPC_DONE) { 3122 if (status & MWL8K_A2H_INT_OPC_DONE) {
2783 if (priv->hostcmd_wait != NULL) 3123 if (priv->hostcmd_wait != NULL)
2784 complete(priv->hostcmd_wait); 3124 complete(priv->hostcmd_wait);
@@ -2793,6 +3133,53 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
2793 return IRQ_HANDLED; 3133 return IRQ_HANDLED;
2794} 3134}
2795 3135
3136static void mwl8k_tx_poll(unsigned long data)
3137{
3138 struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
3139 struct mwl8k_priv *priv = hw->priv;
3140 int limit;
3141 int i;
3142
3143 limit = 32;
3144
3145 spin_lock_bh(&priv->tx_lock);
3146
3147 for (i = 0; i < MWL8K_TX_QUEUES; i++)
3148 limit -= mwl8k_txq_reclaim(hw, i, limit, 0);
3149
3150 if (!priv->pending_tx_pkts && priv->tx_wait != NULL) {
3151 complete(priv->tx_wait);
3152 priv->tx_wait = NULL;
3153 }
3154
3155 spin_unlock_bh(&priv->tx_lock);
3156
3157 if (limit) {
3158 writel(~MWL8K_A2H_INT_TX_DONE,
3159 priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
3160 } else {
3161 tasklet_schedule(&priv->poll_tx_task);
3162 }
3163}
3164
3165static void mwl8k_rx_poll(unsigned long data)
3166{
3167 struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
3168 struct mwl8k_priv *priv = hw->priv;
3169 int limit;
3170
3171 limit = 32;
3172 limit -= rxq_process(hw, 0, limit);
3173 limit -= rxq_refill(hw, 0, limit);
3174
3175 if (limit) {
3176 writel(~MWL8K_A2H_INT_RX_READY,
3177 priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
3178 } else {
3179 tasklet_schedule(&priv->poll_rx_task);
3180 }
3181}
3182
2796 3183
2797/* 3184/*
2798 * Core driver operations. 3185 * Core driver operations.
@@ -2803,7 +3190,7 @@ static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2803 int index = skb_get_queue_mapping(skb); 3190 int index = skb_get_queue_mapping(skb);
2804 int rc; 3191 int rc;
2805 3192
2806 if (priv->current_channel == NULL) { 3193 if (!priv->radio_on) {
2807 printk(KERN_DEBUG "%s: dropped TX frame since radio " 3194 printk(KERN_DEBUG "%s: dropped TX frame since radio "
2808 "disabled\n", wiphy_name(hw->wiphy)); 3195 "disabled\n", wiphy_name(hw->wiphy));
2809 dev_kfree_skb(skb); 3196 dev_kfree_skb(skb);
@@ -2828,19 +3215,20 @@ static int mwl8k_start(struct ieee80211_hw *hw)
2828 return -EIO; 3215 return -EIO;
2829 } 3216 }
2830 3217
2831 /* Enable tx reclaim tasklet */ 3218 /* Enable TX reclaim and RX tasklets. */
2832 tasklet_enable(&priv->tx_reclaim_task); 3219 tasklet_enable(&priv->poll_tx_task);
3220 tasklet_enable(&priv->poll_rx_task);
2833 3221
2834 /* Enable interrupts */ 3222 /* Enable interrupts */
2835 iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 3223 iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
2836 3224
2837 rc = mwl8k_fw_lock(hw); 3225 rc = mwl8k_fw_lock(hw);
2838 if (!rc) { 3226 if (!rc) {
2839 rc = mwl8k_cmd_802_11_radio_enable(hw); 3227 rc = mwl8k_cmd_radio_enable(hw);
2840 3228
2841 if (!priv->ap_fw) { 3229 if (!priv->ap_fw) {
2842 if (!rc) 3230 if (!rc)
2843 rc = mwl8k_enable_sniffer(hw, 0); 3231 rc = mwl8k_cmd_enable_sniffer(hw, 0);
2844 3232
2845 if (!rc) 3233 if (!rc)
2846 rc = mwl8k_cmd_set_pre_scan(hw); 3234 rc = mwl8k_cmd_set_pre_scan(hw);
@@ -2851,10 +3239,10 @@ static int mwl8k_start(struct ieee80211_hw *hw)
2851 } 3239 }
2852 3240
2853 if (!rc) 3241 if (!rc)
2854 rc = mwl8k_cmd_setrateadaptmode(hw, 0); 3242 rc = mwl8k_cmd_set_rateadapt_mode(hw, 0);
2855 3243
2856 if (!rc) 3244 if (!rc)
2857 rc = mwl8k_set_wmm(hw, 0); 3245 rc = mwl8k_cmd_set_wmm_mode(hw, 0);
2858 3246
2859 mwl8k_fw_unlock(hw); 3247 mwl8k_fw_unlock(hw);
2860 } 3248 }
@@ -2862,7 +3250,8 @@ static int mwl8k_start(struct ieee80211_hw *hw)
2862 if (rc) { 3250 if (rc) {
2863 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 3251 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
2864 free_irq(priv->pdev->irq, hw); 3252 free_irq(priv->pdev->irq, hw);
2865 tasklet_disable(&priv->tx_reclaim_task); 3253 tasklet_disable(&priv->poll_tx_task);
3254 tasklet_disable(&priv->poll_rx_task);
2866 } 3255 }
2867 3256
2868 return rc; 3257 return rc;
@@ -2873,7 +3262,7 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
2873 struct mwl8k_priv *priv = hw->priv; 3262 struct mwl8k_priv *priv = hw->priv;
2874 int i; 3263 int i;
2875 3264
2876 mwl8k_cmd_802_11_radio_disable(hw); 3265 mwl8k_cmd_radio_disable(hw);
2877 3266
2878 ieee80211_stop_queues(hw); 3267 ieee80211_stop_queues(hw);
2879 3268
@@ -2886,36 +3275,27 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
2886 if (priv->beacon_skb != NULL) 3275 if (priv->beacon_skb != NULL)
2887 dev_kfree_skb(priv->beacon_skb); 3276 dev_kfree_skb(priv->beacon_skb);
2888 3277
2889 /* Stop tx reclaim tasklet */ 3278 /* Stop TX reclaim and RX tasklets. */
2890 tasklet_disable(&priv->tx_reclaim_task); 3279 tasklet_disable(&priv->poll_tx_task);
3280 tasklet_disable(&priv->poll_rx_task);
2891 3281
2892 /* Return all skbs to mac80211 */ 3282 /* Return all skbs to mac80211 */
2893 for (i = 0; i < MWL8K_TX_QUEUES; i++) 3283 for (i = 0; i < MWL8K_TX_QUEUES; i++)
2894 mwl8k_txq_reclaim(hw, i, 1); 3284 mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
2895} 3285}
2896 3286
2897static int mwl8k_add_interface(struct ieee80211_hw *hw, 3287static int mwl8k_add_interface(struct ieee80211_hw *hw,
2898 struct ieee80211_if_init_conf *conf) 3288 struct ieee80211_vif *vif)
2899{ 3289{
2900 struct mwl8k_priv *priv = hw->priv; 3290 struct mwl8k_priv *priv = hw->priv;
2901 struct mwl8k_vif *mwl8k_vif; 3291 struct mwl8k_vif *mwl8k_vif;
2902 3292 u32 macids_supported;
2903 /* 3293 int macid;
2904 * We only support one active interface at a time.
2905 */
2906 if (priv->vif != NULL)
2907 return -EBUSY;
2908
2909 /*
2910 * We only support managed interfaces for now.
2911 */
2912 if (conf->type != NL80211_IFTYPE_STATION)
2913 return -EINVAL;
2914 3294
2915 /* 3295 /*
2916 * Reject interface creation if sniffer mode is active, as 3296 * Reject interface creation if sniffer mode is active, as
2917 * STA operation is mutually exclusive with hardware sniffer 3297 * STA operation is mutually exclusive with hardware sniffer
2918 * mode. 3298 * mode. (Sniffer mode is only used on STA firmware.)
2919 */ 3299 */
2920 if (priv->sniffer_enabled) { 3300 if (priv->sniffer_enabled) {
2921 printk(KERN_INFO "%s: unable to create STA " 3301 printk(KERN_INFO "%s: unable to create STA "
@@ -2924,37 +3304,54 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
2924 return -EINVAL; 3304 return -EINVAL;
2925 } 3305 }
2926 3306
2927 /* Clean out driver private area */
2928 mwl8k_vif = MWL8K_VIF(conf->vif);
2929 memset(mwl8k_vif, 0, sizeof(*mwl8k_vif));
2930 3307
2931 /* Set and save the mac address */ 3308 switch (vif->type) {
2932 mwl8k_set_mac_addr(hw, conf->mac_addr); 3309 case NL80211_IFTYPE_AP:
2933 memcpy(mwl8k_vif->mac_addr, conf->mac_addr, ETH_ALEN); 3310 macids_supported = priv->ap_macids_supported;
3311 break;
3312 case NL80211_IFTYPE_STATION:
3313 macids_supported = priv->sta_macids_supported;
3314 break;
3315 default:
3316 return -EINVAL;
3317 }
2934 3318
2935 /* Back pointer to parent config block */ 3319 macid = ffs(macids_supported & ~priv->macids_used);
2936 mwl8k_vif->priv = priv; 3320 if (!macid--)
3321 return -EBUSY;
2937 3322
2938 /* Set Initial sequence number to zero */ 3323 /* Setup driver private area. */
3324 mwl8k_vif = MWL8K_VIF(vif);
3325 memset(mwl8k_vif, 0, sizeof(*mwl8k_vif));
3326 mwl8k_vif->vif = vif;
3327 mwl8k_vif->macid = macid;
2939 mwl8k_vif->seqno = 0; 3328 mwl8k_vif->seqno = 0;
2940 3329
2941 priv->vif = conf->vif; 3330 /* Set the mac address. */
2942 priv->current_channel = NULL; 3331 mwl8k_cmd_set_mac_addr(hw, vif, vif->addr);
3332
3333 if (priv->ap_fw)
3334 mwl8k_cmd_set_new_stn_add_self(hw, vif);
3335
3336 priv->macids_used |= 1 << mwl8k_vif->macid;
3337 list_add_tail(&mwl8k_vif->list, &priv->vif_list);
2943 3338
2944 return 0; 3339 return 0;
2945} 3340}
2946 3341
2947static void mwl8k_remove_interface(struct ieee80211_hw *hw, 3342static void mwl8k_remove_interface(struct ieee80211_hw *hw,
2948 struct ieee80211_if_init_conf *conf) 3343 struct ieee80211_vif *vif)
2949{ 3344{
2950 struct mwl8k_priv *priv = hw->priv; 3345 struct mwl8k_priv *priv = hw->priv;
3346 struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
2951 3347
2952 if (priv->vif == NULL) 3348 if (priv->ap_fw)
2953 return; 3349 mwl8k_cmd_set_new_stn_del(hw, vif, vif->addr);
2954 3350
2955 mwl8k_set_mac_addr(hw, "\x00\x00\x00\x00\x00\x00"); 3351 mwl8k_cmd_set_mac_addr(hw, vif, "\x00\x00\x00\x00\x00\x00");
2956 3352
2957 priv->vif = NULL; 3353 priv->macids_used &= ~(1 << mwl8k_vif->macid);
3354 list_del(&mwl8k_vif->list);
2958} 3355}
2959 3356
2960static int mwl8k_config(struct ieee80211_hw *hw, u32 changed) 3357static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
@@ -2964,8 +3361,7 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
2964 int rc; 3361 int rc;
2965 3362
2966 if (conf->flags & IEEE80211_CONF_IDLE) { 3363 if (conf->flags & IEEE80211_CONF_IDLE) {
2967 mwl8k_cmd_802_11_radio_disable(hw); 3364 mwl8k_cmd_radio_disable(hw);
2968 priv->current_channel = NULL;
2969 return 0; 3365 return 0;
2970 } 3366 }
2971 3367
@@ -2973,19 +3369,17 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
2973 if (rc) 3369 if (rc)
2974 return rc; 3370 return rc;
2975 3371
2976 rc = mwl8k_cmd_802_11_radio_enable(hw); 3372 rc = mwl8k_cmd_radio_enable(hw);
2977 if (rc) 3373 if (rc)
2978 goto out; 3374 goto out;
2979 3375
2980 rc = mwl8k_cmd_set_rf_channel(hw, conf->channel); 3376 rc = mwl8k_cmd_set_rf_channel(hw, conf);
2981 if (rc) 3377 if (rc)
2982 goto out; 3378 goto out;
2983 3379
2984 priv->current_channel = conf->channel;
2985
2986 if (conf->power_level > 18) 3380 if (conf->power_level > 18)
2987 conf->power_level = 18; 3381 conf->power_level = 18;
2988 rc = mwl8k_cmd_802_11_rf_tx_power(hw, conf->power_level); 3382 rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
2989 if (rc) 3383 if (rc)
2990 goto out; 3384 goto out;
2991 3385
@@ -3003,79 +3397,160 @@ out:
3003 return rc; 3397 return rc;
3004} 3398}
3005 3399
3006static void mwl8k_bss_info_changed(struct ieee80211_hw *hw, 3400static void
3007 struct ieee80211_vif *vif, 3401mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3008 struct ieee80211_bss_conf *info, 3402 struct ieee80211_bss_conf *info, u32 changed)
3009 u32 changed)
3010{ 3403{
3011 struct mwl8k_priv *priv = hw->priv; 3404 struct mwl8k_priv *priv = hw->priv;
3012 struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); 3405 u32 ap_legacy_rates;
3406 u8 ap_mcs_rates[16];
3013 int rc; 3407 int rc;
3014 3408
3015 if ((changed & BSS_CHANGED_ASSOC) == 0) 3409 if (mwl8k_fw_lock(hw))
3016 return; 3410 return;
3017 3411
3018 priv->capture_beacon = false; 3412 /*
3019 3413 * No need to capture a beacon if we're no longer associated.
3020 rc = mwl8k_fw_lock(hw); 3414 */
3021 if (rc) 3415 if ((changed & BSS_CHANGED_ASSOC) && !vif->bss_conf.assoc)
3022 return; 3416 priv->capture_beacon = false;
3023 3417
3024 if (info->assoc) { 3418 /*
3025 memcpy(&mwl8k_vif->bss_info, info, 3419 * Get the AP's legacy and MCS rates.
3026 sizeof(struct ieee80211_bss_conf)); 3420 */
3421 if (vif->bss_conf.assoc) {
3422 struct ieee80211_sta *ap;
3027 3423
3028 memcpy(mwl8k_vif->bssid, info->bssid, ETH_ALEN); 3424 rcu_read_lock();
3029 3425
3030 /* Install rates */ 3426 ap = ieee80211_find_sta(vif, vif->bss_conf.bssid);
3031 rc = mwl8k_update_rateset(hw, vif); 3427 if (ap == NULL) {
3032 if (rc) 3428 rcu_read_unlock();
3033 goto out; 3429 goto out;
3430 }
3431
3432 if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) {
3433 ap_legacy_rates = ap->supp_rates[IEEE80211_BAND_2GHZ];
3434 } else {
3435 ap_legacy_rates =
3436 ap->supp_rates[IEEE80211_BAND_5GHZ] << 5;
3437 }
3438 memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
3034 3439
3035 /* Turn on rate adaptation */ 3440 rcu_read_unlock();
3036 rc = mwl8k_cmd_use_fixed_rate(hw, MWL8K_USE_AUTO_RATE, 3441 }
3037 MWL8K_UCAST_RATE, NULL); 3442
3443 if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc) {
3444 rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates);
3038 if (rc) 3445 if (rc)
3039 goto out; 3446 goto out;
3040 3447
3041 /* Set radio preamble */ 3448 rc = mwl8k_cmd_use_fixed_rate_sta(hw);
3042 rc = mwl8k_set_radio_preamble(hw, info->use_short_preamble);
3043 if (rc) 3449 if (rc)
3044 goto out; 3450 goto out;
3451 }
3045 3452
3046 /* Set slot time */ 3453 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3047 rc = mwl8k_cmd_set_slot(hw, info->use_short_slot); 3454 rc = mwl8k_set_radio_preamble(hw,
3455 vif->bss_conf.use_short_preamble);
3048 if (rc) 3456 if (rc)
3049 goto out; 3457 goto out;
3458 }
3050 3459
3051 /* Update peer rate info */ 3460 if (changed & BSS_CHANGED_ERP_SLOT) {
3052 rc = mwl8k_cmd_update_sta_db(hw, vif, 3461 rc = mwl8k_cmd_set_slot(hw, vif->bss_conf.use_short_slot);
3053 MWL8K_STA_DB_MODIFY_ENTRY);
3054 if (rc) 3462 if (rc)
3055 goto out; 3463 goto out;
3464 }
3056 3465
3057 /* Set AID */ 3466 if (vif->bss_conf.assoc &&
3058 rc = mwl8k_cmd_set_aid(hw, vif); 3467 (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_CTS_PROT |
3468 BSS_CHANGED_HT))) {
3469 rc = mwl8k_cmd_set_aid(hw, vif, ap_legacy_rates);
3059 if (rc) 3470 if (rc)
3060 goto out; 3471 goto out;
3472 }
3061 3473
3474 if (vif->bss_conf.assoc &&
3475 (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INT))) {
3062 /* 3476 /*
3063 * Finalize the join. Tell rx handler to process 3477 * Finalize the join. Tell rx handler to process
3064 * next beacon from our BSSID. 3478 * next beacon from our BSSID.
3065 */ 3479 */
3066 memcpy(priv->capture_bssid, mwl8k_vif->bssid, ETH_ALEN); 3480 memcpy(priv->capture_bssid, vif->bss_conf.bssid, ETH_ALEN);
3067 priv->capture_beacon = true; 3481 priv->capture_beacon = true;
3068 } else {
3069 rc = mwl8k_cmd_update_sta_db(hw, vif, MWL8K_STA_DB_DEL_ENTRY);
3070 memset(&mwl8k_vif->bss_info, 0,
3071 sizeof(struct ieee80211_bss_conf));
3072 memset(mwl8k_vif->bssid, 0, ETH_ALEN);
3073 } 3482 }
3074 3483
3075out: 3484out:
3076 mwl8k_fw_unlock(hw); 3485 mwl8k_fw_unlock(hw);
3077} 3486}
3078 3487
3488static void
3489mwl8k_bss_info_changed_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3490 struct ieee80211_bss_conf *info, u32 changed)
3491{
3492 int rc;
3493
3494 if (mwl8k_fw_lock(hw))
3495 return;
3496
3497 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3498 rc = mwl8k_set_radio_preamble(hw,
3499 vif->bss_conf.use_short_preamble);
3500 if (rc)
3501 goto out;
3502 }
3503
3504 if (changed & BSS_CHANGED_BASIC_RATES) {
3505 int idx;
3506 int rate;
3507
3508 /*
3509 * Use lowest supported basic rate for multicasts
3510 * and management frames (such as probe responses --
3511 * beacons will always go out at 1 Mb/s).
3512 */
3513 idx = ffs(vif->bss_conf.basic_rates);
3514 if (idx)
3515 idx--;
3516
3517 if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
3518 rate = mwl8k_rates_24[idx].hw_value;
3519 else
3520 rate = mwl8k_rates_50[idx].hw_value;
3521
3522 mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
3523 }
3524
3525 if (changed & (BSS_CHANGED_BEACON_INT | BSS_CHANGED_BEACON)) {
3526 struct sk_buff *skb;
3527
3528 skb = ieee80211_beacon_get(hw, vif);
3529 if (skb != NULL) {
3530 mwl8k_cmd_set_beacon(hw, vif, skb->data, skb->len);
3531 kfree_skb(skb);
3532 }
3533 }
3534
3535 if (changed & BSS_CHANGED_BEACON_ENABLED)
3536 mwl8k_cmd_bss_start(hw, vif, info->enable_beacon);
3537
3538out:
3539 mwl8k_fw_unlock(hw);
3540}
3541
3542static void
3543mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3544 struct ieee80211_bss_conf *info, u32 changed)
3545{
3546 struct mwl8k_priv *priv = hw->priv;
3547
3548 if (!priv->ap_fw)
3549 mwl8k_bss_info_changed_sta(hw, vif, info, changed);
3550 else
3551 mwl8k_bss_info_changed_ap(hw, vif, info, changed);
3552}
3553
3079static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw, 3554static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
3080 int mc_count, struct dev_addr_list *mclist) 3555 int mc_count, struct dev_addr_list *mclist)
3081{ 3556{
@@ -3105,7 +3580,7 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
3105 * operation, so refuse to enable sniffer mode if a STA 3580 * operation, so refuse to enable sniffer mode if a STA
3106 * interface is active. 3581 * interface is active.
3107 */ 3582 */
3108 if (priv->vif != NULL) { 3583 if (!list_empty(&priv->vif_list)) {
3109 if (net_ratelimit()) 3584 if (net_ratelimit())
3110 printk(KERN_INFO "%s: not enabling sniffer " 3585 printk(KERN_INFO "%s: not enabling sniffer "
3111 "mode because STA interface is active\n", 3586 "mode because STA interface is active\n",
@@ -3114,7 +3589,7 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
3114 } 3589 }
3115 3590
3116 if (!priv->sniffer_enabled) { 3591 if (!priv->sniffer_enabled) {
3117 if (mwl8k_enable_sniffer(hw, 1)) 3592 if (mwl8k_cmd_enable_sniffer(hw, 1))
3118 return 0; 3593 return 0;
3119 priv->sniffer_enabled = true; 3594 priv->sniffer_enabled = true;
3120 } 3595 }
@@ -3126,6 +3601,14 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
3126 return 1; 3601 return 1;
3127} 3602}
3128 3603
3604static struct mwl8k_vif *mwl8k_first_vif(struct mwl8k_priv *priv)
3605{
3606 if (!list_empty(&priv->vif_list))
3607 return list_entry(priv->vif_list.next, struct mwl8k_vif, list);
3608
3609 return NULL;
3610}
3611
3129static void mwl8k_configure_filter(struct ieee80211_hw *hw, 3612static void mwl8k_configure_filter(struct ieee80211_hw *hw,
3130 unsigned int changed_flags, 3613 unsigned int changed_flags,
3131 unsigned int *total_flags, 3614 unsigned int *total_flags,
@@ -3163,7 +3646,7 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
3163 } 3646 }
3164 3647
3165 if (priv->sniffer_enabled) { 3648 if (priv->sniffer_enabled) {
3166 mwl8k_enable_sniffer(hw, 0); 3649 mwl8k_cmd_enable_sniffer(hw, 0);
3167 priv->sniffer_enabled = false; 3650 priv->sniffer_enabled = false;
3168 } 3651 }
3169 3652
@@ -3174,7 +3657,8 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
3174 */ 3657 */
3175 mwl8k_cmd_set_pre_scan(hw); 3658 mwl8k_cmd_set_pre_scan(hw);
3176 } else { 3659 } else {
3177 u8 *bssid; 3660 struct mwl8k_vif *mwl8k_vif;
3661 const u8 *bssid;
3178 3662
3179 /* 3663 /*
3180 * Enable the BSS filter. 3664 * Enable the BSS filter.
@@ -3184,9 +3668,11 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
3184 * (where the OUI part needs to be nonzero for 3668 * (where the OUI part needs to be nonzero for
3185 * the BSSID to be accepted by POST_SCAN). 3669 * the BSSID to be accepted by POST_SCAN).
3186 */ 3670 */
3187 bssid = "\x01\x00\x00\x00\x00\x00"; 3671 mwl8k_vif = mwl8k_first_vif(priv);
3188 if (priv->vif != NULL) 3672 if (mwl8k_vif != NULL)
3189 bssid = MWL8K_VIF(priv->vif)->bssid; 3673 bssid = mwl8k_vif->vif->bss_conf.bssid;
3674 else
3675 bssid = "\x01\x00\x00\x00\x00\x00";
3190 3676
3191 mwl8k_cmd_set_post_scan(hw, bssid); 3677 mwl8k_cmd_set_post_scan(hw, bssid);
3192 } 3678 }
@@ -3213,7 +3699,39 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
3213 3699
3214static int mwl8k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 3700static int mwl8k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3215{ 3701{
3216 return mwl8k_rts_threshold(hw, MWL8K_CMD_SET, value); 3702 return mwl8k_cmd_set_rts_threshold(hw, value);
3703}
3704
3705static int mwl8k_sta_remove(struct ieee80211_hw *hw,
3706 struct ieee80211_vif *vif,
3707 struct ieee80211_sta *sta)
3708{
3709 struct mwl8k_priv *priv = hw->priv;
3710
3711 if (priv->ap_fw)
3712 return mwl8k_cmd_set_new_stn_del(hw, vif, sta->addr);
3713 else
3714 return mwl8k_cmd_update_stadb_del(hw, vif, sta->addr);
3715}
3716
3717static int mwl8k_sta_add(struct ieee80211_hw *hw,
3718 struct ieee80211_vif *vif,
3719 struct ieee80211_sta *sta)
3720{
3721 struct mwl8k_priv *priv = hw->priv;
3722 int ret;
3723
3724 if (!priv->ap_fw) {
3725 ret = mwl8k_cmd_update_stadb_add(hw, vif, sta);
3726 if (ret >= 0) {
3727 MWL8K_STA(sta)->peer_id = ret;
3728 return 0;
3729 }
3730
3731 return ret;
3732 }
3733
3734 return mwl8k_cmd_set_new_stn_add(hw, vif, sta);
3217} 3735}
3218 3736
3219static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue, 3737static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
@@ -3225,14 +3743,14 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
3225 rc = mwl8k_fw_lock(hw); 3743 rc = mwl8k_fw_lock(hw);
3226 if (!rc) { 3744 if (!rc) {
3227 if (!priv->wmm_enabled) 3745 if (!priv->wmm_enabled)
3228 rc = mwl8k_set_wmm(hw, 1); 3746 rc = mwl8k_cmd_set_wmm_mode(hw, 1);
3229 3747
3230 if (!rc) 3748 if (!rc)
3231 rc = mwl8k_set_edca_params(hw, queue, 3749 rc = mwl8k_cmd_set_edca_params(hw, queue,
3232 params->cw_min, 3750 params->cw_min,
3233 params->cw_max, 3751 params->cw_max,
3234 params->aifs, 3752 params->aifs,
3235 params->txop); 3753 params->txop);
3236 3754
3237 mwl8k_fw_unlock(hw); 3755 mwl8k_fw_unlock(hw);
3238 } 3756 }
@@ -3240,28 +3758,26 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
3240 return rc; 3758 return rc;
3241} 3759}
3242 3760
3243static int mwl8k_get_tx_stats(struct ieee80211_hw *hw, 3761static int mwl8k_get_stats(struct ieee80211_hw *hw,
3244 struct ieee80211_tx_queue_stats *stats) 3762 struct ieee80211_low_level_stats *stats)
3245{ 3763{
3246 struct mwl8k_priv *priv = hw->priv; 3764 return mwl8k_cmd_get_stat(hw, stats);
3247 struct mwl8k_tx_queue *txq;
3248 int index;
3249
3250 spin_lock_bh(&priv->tx_lock);
3251 for (index = 0; index < MWL8K_TX_QUEUES; index++) {
3252 txq = priv->txq + index;
3253 memcpy(&stats[index], &txq->stats,
3254 sizeof(struct ieee80211_tx_queue_stats));
3255 }
3256 spin_unlock_bh(&priv->tx_lock);
3257
3258 return 0;
3259} 3765}
3260 3766
3261static int mwl8k_get_stats(struct ieee80211_hw *hw, 3767static int
3262 struct ieee80211_low_level_stats *stats) 3768mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3769 enum ieee80211_ampdu_mlme_action action,
3770 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
3263{ 3771{
3264 return mwl8k_cmd_802_11_get_stat(hw, stats); 3772 switch (action) {
3773 case IEEE80211_AMPDU_RX_START:
3774 case IEEE80211_AMPDU_RX_STOP:
3775 if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
3776 return -ENOTSUPP;
3777 return 0;
3778 default:
3779 return -ENOTSUPP;
3780 }
3265} 3781}
3266 3782
3267static const struct ieee80211_ops mwl8k_ops = { 3783static const struct ieee80211_ops mwl8k_ops = {
@@ -3275,67 +3791,72 @@ static const struct ieee80211_ops mwl8k_ops = {
3275 .prepare_multicast = mwl8k_prepare_multicast, 3791 .prepare_multicast = mwl8k_prepare_multicast,
3276 .configure_filter = mwl8k_configure_filter, 3792 .configure_filter = mwl8k_configure_filter,
3277 .set_rts_threshold = mwl8k_set_rts_threshold, 3793 .set_rts_threshold = mwl8k_set_rts_threshold,
3794 .sta_add = mwl8k_sta_add,
3795 .sta_remove = mwl8k_sta_remove,
3278 .conf_tx = mwl8k_conf_tx, 3796 .conf_tx = mwl8k_conf_tx,
3279 .get_tx_stats = mwl8k_get_tx_stats,
3280 .get_stats = mwl8k_get_stats, 3797 .get_stats = mwl8k_get_stats,
3798 .ampdu_action = mwl8k_ampdu_action,
3281}; 3799};
3282 3800
3283static void mwl8k_tx_reclaim_handler(unsigned long data)
3284{
3285 int i;
3286 struct ieee80211_hw *hw = (struct ieee80211_hw *) data;
3287 struct mwl8k_priv *priv = hw->priv;
3288
3289 spin_lock_bh(&priv->tx_lock);
3290 for (i = 0; i < MWL8K_TX_QUEUES; i++)
3291 mwl8k_txq_reclaim(hw, i, 0);
3292
3293 if (priv->tx_wait != NULL && !priv->pending_tx_pkts) {
3294 complete(priv->tx_wait);
3295 priv->tx_wait = NULL;
3296 }
3297 spin_unlock_bh(&priv->tx_lock);
3298}
3299
3300static void mwl8k_finalize_join_worker(struct work_struct *work) 3801static void mwl8k_finalize_join_worker(struct work_struct *work)
3301{ 3802{
3302 struct mwl8k_priv *priv = 3803 struct mwl8k_priv *priv =
3303 container_of(work, struct mwl8k_priv, finalize_join_worker); 3804 container_of(work, struct mwl8k_priv, finalize_join_worker);
3304 struct sk_buff *skb = priv->beacon_skb; 3805 struct sk_buff *skb = priv->beacon_skb;
3305 u8 dtim = MWL8K_VIF(priv->vif)->bss_info.dtim_period; 3806 struct ieee80211_mgmt *mgmt = (void *)skb->data;
3807 int len = skb->len - offsetof(struct ieee80211_mgmt, u.beacon.variable);
3808 const u8 *tim = cfg80211_find_ie(WLAN_EID_TIM,
3809 mgmt->u.beacon.variable, len);
3810 int dtim_period = 1;
3306 3811
3307 mwl8k_finalize_join(priv->hw, skb->data, skb->len, dtim); 3812 if (tim && tim[1] >= 2)
3308 dev_kfree_skb(skb); 3813 dtim_period = tim[3];
3814
3815 mwl8k_cmd_finalize_join(priv->hw, skb->data, skb->len, dtim_period);
3309 3816
3817 dev_kfree_skb(skb);
3310 priv->beacon_skb = NULL; 3818 priv->beacon_skb = NULL;
3311} 3819}
3312 3820
3313enum { 3821enum {
3314 MWL8687 = 0, 3822 MWL8363 = 0,
3823 MWL8687,
3315 MWL8366, 3824 MWL8366,
3316}; 3825};
3317 3826
3318static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = { 3827static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = {
3319 { 3828 [MWL8363] = {
3829 .part_name = "88w8363",
3830 .helper_image = "mwl8k/helper_8363.fw",
3831 .fw_image = "mwl8k/fmimage_8363.fw",
3832 },
3833 [MWL8687] = {
3320 .part_name = "88w8687", 3834 .part_name = "88w8687",
3321 .helper_image = "mwl8k/helper_8687.fw", 3835 .helper_image = "mwl8k/helper_8687.fw",
3322 .fw_image = "mwl8k/fmimage_8687.fw", 3836 .fw_image = "mwl8k/fmimage_8687.fw",
3323 .rxd_ops = &rxd_8687_ops,
3324 .modes = BIT(NL80211_IFTYPE_STATION),
3325 }, 3837 },
3326 { 3838 [MWL8366] = {
3327 .part_name = "88w8366", 3839 .part_name = "88w8366",
3328 .helper_image = "mwl8k/helper_8366.fw", 3840 .helper_image = "mwl8k/helper_8366.fw",
3329 .fw_image = "mwl8k/fmimage_8366.fw", 3841 .fw_image = "mwl8k/fmimage_8366.fw",
3330 .rxd_ops = &rxd_8366_ops, 3842 .ap_rxd_ops = &rxd_8366_ap_ops,
3331 .modes = 0,
3332 }, 3843 },
3333}; 3844};
3334 3845
3846MODULE_FIRMWARE("mwl8k/helper_8363.fw");
3847MODULE_FIRMWARE("mwl8k/fmimage_8363.fw");
3848MODULE_FIRMWARE("mwl8k/helper_8687.fw");
3849MODULE_FIRMWARE("mwl8k/fmimage_8687.fw");
3850MODULE_FIRMWARE("mwl8k/helper_8366.fw");
3851MODULE_FIRMWARE("mwl8k/fmimage_8366.fw");
3852
3335static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = { 3853static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
3854 { PCI_VDEVICE(MARVELL, 0x2a0c), .driver_data = MWL8363, },
3855 { PCI_VDEVICE(MARVELL, 0x2a24), .driver_data = MWL8363, },
3336 { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, }, 3856 { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, },
3337 { PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = MWL8687, }, 3857 { PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = MWL8687, },
3338 { PCI_VDEVICE(MARVELL, 0x2a40), .driver_data = MWL8366, }, 3858 { PCI_VDEVICE(MARVELL, 0x2a40), .driver_data = MWL8366, },
3859 { PCI_VDEVICE(MARVELL, 0x2a43), .driver_data = MWL8366, },
3339 { }, 3860 { },
3340}; 3861};
3341MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table); 3862MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table);
@@ -3354,6 +3875,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3354 printed_version = 1; 3875 printed_version = 1;
3355 } 3876 }
3356 3877
3878
3357 rc = pci_enable_device(pdev); 3879 rc = pci_enable_device(pdev);
3358 if (rc) { 3880 if (rc) {
3359 printk(KERN_ERR "%s: Cannot enable new PCI device\n", 3881 printk(KERN_ERR "%s: Cannot enable new PCI device\n",
@@ -3370,6 +3892,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3370 3892
3371 pci_set_master(pdev); 3893 pci_set_master(pdev);
3372 3894
3895
3373 hw = ieee80211_alloc_hw(sizeof(*priv), &mwl8k_ops); 3896 hw = ieee80211_alloc_hw(sizeof(*priv), &mwl8k_ops);
3374 if (hw == NULL) { 3897 if (hw == NULL) {
3375 printk(KERN_ERR "%s: ieee80211 alloc failed\n", MWL8K_NAME); 3898 printk(KERN_ERR "%s: ieee80211 alloc failed\n", MWL8K_NAME);
@@ -3377,17 +3900,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3377 goto err_free_reg; 3900 goto err_free_reg;
3378 } 3901 }
3379 3902
3903 SET_IEEE80211_DEV(hw, &pdev->dev);
3904 pci_set_drvdata(pdev, hw);
3905
3380 priv = hw->priv; 3906 priv = hw->priv;
3381 priv->hw = hw; 3907 priv->hw = hw;
3382 priv->pdev = pdev; 3908 priv->pdev = pdev;
3383 priv->device_info = &mwl8k_info_tbl[id->driver_data]; 3909 priv->device_info = &mwl8k_info_tbl[id->driver_data];
3384 priv->rxd_ops = priv->device_info->rxd_ops;
3385 priv->sniffer_enabled = false;
3386 priv->wmm_enabled = false;
3387 priv->pending_tx_pkts = 0;
3388 3910
3389 SET_IEEE80211_DEV(hw, &pdev->dev);
3390 pci_set_drvdata(pdev, hw);
3391 3911
3392 priv->sram = pci_iomap(pdev, 0, 0x10000); 3912 priv->sram = pci_iomap(pdev, 0, 0x10000);
3393 if (priv->sram == NULL) { 3913 if (priv->sram == NULL) {
@@ -3410,16 +3930,46 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3410 } 3930 }
3411 } 3931 }
3412 3932
3413 memcpy(priv->channels, mwl8k_channels, sizeof(mwl8k_channels));
3414 priv->band.band = IEEE80211_BAND_2GHZ;
3415 priv->band.channels = priv->channels;
3416 priv->band.n_channels = ARRAY_SIZE(mwl8k_channels);
3417 priv->band.bitrates = priv->rates;
3418 priv->band.n_bitrates = ARRAY_SIZE(mwl8k_rates);
3419 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
3420 3933
3421 BUILD_BUG_ON(sizeof(priv->rates) != sizeof(mwl8k_rates)); 3934 /* Reset firmware and hardware */
3422 memcpy(priv->rates, mwl8k_rates, sizeof(mwl8k_rates)); 3935 mwl8k_hw_reset(priv);
3936
3937 /* Ask userland hotplug daemon for the device firmware */
3938 rc = mwl8k_request_firmware(priv);
3939 if (rc) {
3940 printk(KERN_ERR "%s: Firmware files not found\n",
3941 wiphy_name(hw->wiphy));
3942 goto err_stop_firmware;
3943 }
3944
3945 /* Load firmware into hardware */
3946 rc = mwl8k_load_firmware(hw);
3947 if (rc) {
3948 printk(KERN_ERR "%s: Cannot start firmware\n",
3949 wiphy_name(hw->wiphy));
3950 goto err_stop_firmware;
3951 }
3952
3953 /* Reclaim memory once firmware is successfully loaded */
3954 mwl8k_release_firmware(priv);
3955
3956
3957 if (priv->ap_fw) {
3958 priv->rxd_ops = priv->device_info->ap_rxd_ops;
3959 if (priv->rxd_ops == NULL) {
3960 printk(KERN_ERR "%s: Driver does not have AP "
3961 "firmware image support for this hardware\n",
3962 wiphy_name(hw->wiphy));
3963 goto err_stop_firmware;
3964 }
3965 } else {
3966 priv->rxd_ops = &rxd_sta_ops;
3967 }
3968
3969 priv->sniffer_enabled = false;
3970 priv->wmm_enabled = false;
3971 priv->pending_tx_pkts = 0;
3972
3423 3973
3424 /* 3974 /*
3425 * Extra headroom is the size of the required DMA header 3975 * Extra headroom is the size of the required DMA header
@@ -3432,12 +3982,13 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3432 3982
3433 hw->queues = MWL8K_TX_QUEUES; 3983 hw->queues = MWL8K_TX_QUEUES;
3434 3984
3435 hw->wiphy->interface_modes = priv->device_info->modes;
3436
3437 /* Set rssi and noise values to dBm */ 3985 /* Set rssi and noise values to dBm */
3438 hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM; 3986 hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM;
3439 hw->vif_data_size = sizeof(struct mwl8k_vif); 3987 hw->vif_data_size = sizeof(struct mwl8k_vif);
3440 priv->vif = NULL; 3988 hw->sta_data_size = sizeof(struct mwl8k_sta);
3989
3990 priv->macids_used = 0;
3991 INIT_LIST_HEAD(&priv->vif_list);
3441 3992
3442 /* Set default radio state and preamble */ 3993 /* Set default radio state and preamble */
3443 priv->radio_on = 0; 3994 priv->radio_on = 0;
@@ -3446,19 +3997,20 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3446 /* Finalize join worker */ 3997 /* Finalize join worker */
3447 INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker); 3998 INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
3448 3999
3449 /* TX reclaim tasklet */ 4000 /* TX reclaim and RX tasklets. */
3450 tasklet_init(&priv->tx_reclaim_task, 4001 tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
3451 mwl8k_tx_reclaim_handler, (unsigned long)hw); 4002 tasklet_disable(&priv->poll_tx_task);
3452 tasklet_disable(&priv->tx_reclaim_task); 4003 tasklet_init(&priv->poll_rx_task, mwl8k_rx_poll, (unsigned long)hw);
4004 tasklet_disable(&priv->poll_rx_task);
3453 4005
3454 /* Power management cookie */ 4006 /* Power management cookie */
3455 priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma); 4007 priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma);
3456 if (priv->cookie == NULL) 4008 if (priv->cookie == NULL)
3457 goto err_iounmap; 4009 goto err_stop_firmware;
3458 4010
3459 rc = mwl8k_rxq_init(hw, 0); 4011 rc = mwl8k_rxq_init(hw, 0);
3460 if (rc) 4012 if (rc)
3461 goto err_iounmap; 4013 goto err_free_cookie;
3462 rxq_refill(hw, 0, INT_MAX); 4014 rxq_refill(hw, 0, INT_MAX);
3463 4015
3464 mutex_init(&priv->fw_mutex); 4016 mutex_init(&priv->fw_mutex);
@@ -3478,7 +4030,8 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3478 4030
3479 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); 4031 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
3480 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 4032 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
3481 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL); 4033 iowrite32(MWL8K_A2H_INT_TX_DONE | MWL8K_A2H_INT_RX_READY,
4034 priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL);
3482 iowrite32(0xffffffff, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK); 4035 iowrite32(0xffffffff, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
3483 4036
3484 rc = request_irq(priv->pdev->irq, mwl8k_interrupt, 4037 rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
@@ -3489,31 +4042,9 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3489 goto err_free_queues; 4042 goto err_free_queues;
3490 } 4043 }
3491 4044
3492 /* Reset firmware and hardware */
3493 mwl8k_hw_reset(priv);
3494
3495 /* Ask userland hotplug daemon for the device firmware */
3496 rc = mwl8k_request_firmware(priv);
3497 if (rc) {
3498 printk(KERN_ERR "%s: Firmware files not found\n",
3499 wiphy_name(hw->wiphy));
3500 goto err_free_irq;
3501 }
3502
3503 /* Load firmware into hardware */
3504 rc = mwl8k_load_firmware(hw);
3505 if (rc) {
3506 printk(KERN_ERR "%s: Cannot start firmware\n",
3507 wiphy_name(hw->wiphy));
3508 goto err_stop_firmware;
3509 }
3510
3511 /* Reclaim memory once firmware is successfully loaded */
3512 mwl8k_release_firmware(priv);
3513
3514 /* 4045 /*
3515 * Temporarily enable interrupts. Initial firmware host 4046 * Temporarily enable interrupts. Initial firmware host
3516 * commands use interrupts and avoids polling. Disable 4047 * commands use interrupts and avoid polling. Disable
3517 * interrupts when done. 4048 * interrupts when done.
3518 */ 4049 */
3519 iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 4050 iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
@@ -3529,22 +4060,29 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3529 if (rc) { 4060 if (rc) {
3530 printk(KERN_ERR "%s: Cannot initialise firmware\n", 4061 printk(KERN_ERR "%s: Cannot initialise firmware\n",
3531 wiphy_name(hw->wiphy)); 4062 wiphy_name(hw->wiphy));
3532 goto err_stop_firmware; 4063 goto err_free_irq;
3533 } 4064 }
3534 4065
4066 hw->wiphy->interface_modes = 0;
4067 if (priv->ap_macids_supported)
4068 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
4069 if (priv->sta_macids_supported)
4070 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION);
4071
4072
3535 /* Turn radio off */ 4073 /* Turn radio off */
3536 rc = mwl8k_cmd_802_11_radio_disable(hw); 4074 rc = mwl8k_cmd_radio_disable(hw);
3537 if (rc) { 4075 if (rc) {
3538 printk(KERN_ERR "%s: Cannot disable\n", wiphy_name(hw->wiphy)); 4076 printk(KERN_ERR "%s: Cannot disable\n", wiphy_name(hw->wiphy));
3539 goto err_stop_firmware; 4077 goto err_free_irq;
3540 } 4078 }
3541 4079
3542 /* Clear MAC address */ 4080 /* Clear MAC address */
3543 rc = mwl8k_set_mac_addr(hw, "\x00\x00\x00\x00\x00\x00"); 4081 rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00");
3544 if (rc) { 4082 if (rc) {
3545 printk(KERN_ERR "%s: Cannot clear MAC address\n", 4083 printk(KERN_ERR "%s: Cannot clear MAC address\n",
3546 wiphy_name(hw->wiphy)); 4084 wiphy_name(hw->wiphy));
3547 goto err_stop_firmware; 4085 goto err_free_irq;
3548 } 4086 }
3549 4087
3550 /* Disable interrupts */ 4088 /* Disable interrupts */
@@ -3555,7 +4093,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3555 if (rc) { 4093 if (rc) {
3556 printk(KERN_ERR "%s: Cannot register device\n", 4094 printk(KERN_ERR "%s: Cannot register device\n",
3557 wiphy_name(hw->wiphy)); 4095 wiphy_name(hw->wiphy));
3558 goto err_stop_firmware; 4096 goto err_free_queues;
3559 } 4097 }
3560 4098
3561 printk(KERN_INFO "%s: %s v%d, %pM, %s firmware %u.%u.%u.%u\n", 4099 printk(KERN_INFO "%s: %s v%d, %pM, %s firmware %u.%u.%u.%u\n",
@@ -3567,10 +4105,6 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3567 4105
3568 return 0; 4106 return 0;
3569 4107
3570err_stop_firmware:
3571 mwl8k_hw_reset(priv);
3572 mwl8k_release_firmware(priv);
3573
3574err_free_irq: 4108err_free_irq:
3575 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 4109 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
3576 free_irq(priv->pdev->irq, hw); 4110 free_irq(priv->pdev->irq, hw);
@@ -3580,11 +4114,16 @@ err_free_queues:
3580 mwl8k_txq_deinit(hw, i); 4114 mwl8k_txq_deinit(hw, i);
3581 mwl8k_rxq_deinit(hw, 0); 4115 mwl8k_rxq_deinit(hw, 0);
3582 4116
3583err_iounmap: 4117err_free_cookie:
3584 if (priv->cookie != NULL) 4118 if (priv->cookie != NULL)
3585 pci_free_consistent(priv->pdev, 4, 4119 pci_free_consistent(priv->pdev, 4,
3586 priv->cookie, priv->cookie_dma); 4120 priv->cookie, priv->cookie_dma);
3587 4121
4122err_stop_firmware:
4123 mwl8k_hw_reset(priv);
4124 mwl8k_release_firmware(priv);
4125
4126err_iounmap:
3588 if (priv->regs != NULL) 4127 if (priv->regs != NULL)
3589 pci_iounmap(pdev, priv->regs); 4128 pci_iounmap(pdev, priv->regs);
3590 4129
@@ -3622,15 +4161,16 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
3622 4161
3623 ieee80211_unregister_hw(hw); 4162 ieee80211_unregister_hw(hw);
3624 4163
3625 /* Remove tx reclaim tasklet */ 4164 /* Remove TX reclaim and RX tasklets. */
3626 tasklet_kill(&priv->tx_reclaim_task); 4165 tasklet_kill(&priv->poll_tx_task);
4166 tasklet_kill(&priv->poll_rx_task);
3627 4167
3628 /* Stop hardware */ 4168 /* Stop hardware */
3629 mwl8k_hw_reset(priv); 4169 mwl8k_hw_reset(priv);
3630 4170
3631 /* Return all skbs to mac80211 */ 4171 /* Return all skbs to mac80211 */
3632 for (i = 0; i < MWL8K_TX_QUEUES; i++) 4172 for (i = 0; i < MWL8K_TX_QUEUES; i++)
3633 mwl8k_txq_reclaim(hw, i, 1); 4173 mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
3634 4174
3635 for (i = 0; i < MWL8K_TX_QUEUES; i++) 4175 for (i = 0; i < MWL8K_TX_QUEUES; i++)
3636 mwl8k_txq_deinit(hw, i); 4176 mwl8k_txq_deinit(hw, i);
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 404830f47ab2..e6369242e49c 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -1028,7 +1028,7 @@ int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx)
1028} 1028}
1029 1029
1030int __orinoco_hw_set_multicast_list(struct orinoco_private *priv, 1030int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
1031 struct dev_addr_list *mc_list, 1031 struct net_device *dev,
1032 int mc_count, int promisc) 1032 int mc_count, int promisc)
1033{ 1033{
1034 hermes_t *hw = &priv->hw; 1034 hermes_t *hw = &priv->hw;
@@ -1049,24 +1049,16 @@ int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
1049 * group address if either we want to multicast, or if we were 1049 * group address if either we want to multicast, or if we were
1050 * multicasting and want to stop */ 1050 * multicasting and want to stop */
1051 if (!promisc && (mc_count || priv->mc_count)) { 1051 if (!promisc && (mc_count || priv->mc_count)) {
1052 struct dev_mc_list *p = mc_list; 1052 struct dev_mc_list *p;
1053 struct hermes_multicast mclist; 1053 struct hermes_multicast mclist;
1054 int i; 1054 int i = 0;
1055 1055
1056 for (i = 0; i < mc_count; i++) { 1056 netdev_for_each_mc_addr(p, dev) {
1057 /* paranoia: is list shorter than mc_count? */ 1057 if (i == mc_count)
1058 BUG_ON(!p); 1058 break;
1059 /* paranoia: bad address size in list? */ 1059 memcpy(mclist.addr[i++], p->dmi_addr, ETH_ALEN);
1060 BUG_ON(p->dmi_addrlen != ETH_ALEN);
1061
1062 memcpy(mclist.addr[i], p->dmi_addr, ETH_ALEN);
1063 p = p->next;
1064 } 1060 }
1065 1061
1066 if (p)
1067 printk(KERN_WARNING "%s: Multicast list is "
1068 "longer than mc_count\n", priv->ndev->name);
1069
1070 err = hermes_write_ltv(hw, USER_BAP, 1062 err = hermes_write_ltv(hw, USER_BAP,
1071 HERMES_RID_CNFGROUPADDRESSES, 1063 HERMES_RID_CNFGROUPADDRESSES,
1072 HERMES_BYTES_TO_RECLEN(mc_count * ETH_ALEN), 1064 HERMES_BYTES_TO_RECLEN(mc_count * ETH_ALEN),
diff --git a/drivers/net/wireless/orinoco/hw.h b/drivers/net/wireless/orinoco/hw.h
index e2f7fdc4d45a..9799a1d14a63 100644
--- a/drivers/net/wireless/orinoco/hw.h
+++ b/drivers/net/wireless/orinoco/hw.h
@@ -43,7 +43,7 @@ int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
43 u8 *tsc, size_t tsc_len); 43 u8 *tsc, size_t tsc_len);
44int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx); 44int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx);
45int __orinoco_hw_set_multicast_list(struct orinoco_private *priv, 45int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
46 struct dev_addr_list *mc_list, 46 struct net_device *dev,
47 int mc_count, int promisc); 47 int mc_count, int promisc);
48int orinoco_hw_get_essid(struct orinoco_private *priv, int *active, 48int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
49 char buf[IW_ESSID_MAX_SIZE+1]); 49 char buf[IW_ESSID_MAX_SIZE+1]);
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 753a1804eee7..b42634c614b5 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -1668,16 +1668,15 @@ __orinoco_set_multicast_list(struct net_device *dev)
1668 /* The Hermes doesn't seem to have an allmulti mode, so we go 1668 /* The Hermes doesn't seem to have an allmulti mode, so we go
1669 * into promiscuous mode and let the upper levels deal. */ 1669 * into promiscuous mode and let the upper levels deal. */
1670 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) || 1670 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
1671 (dev->mc_count > MAX_MULTICAST(priv))) { 1671 (netdev_mc_count(dev) > MAX_MULTICAST(priv))) {
1672 promisc = 1; 1672 promisc = 1;
1673 mc_count = 0; 1673 mc_count = 0;
1674 } else { 1674 } else {
1675 promisc = 0; 1675 promisc = 0;
1676 mc_count = dev->mc_count; 1676 mc_count = netdev_mc_count(dev);
1677 } 1677 }
1678 1678
1679 err = __orinoco_hw_set_multicast_list(priv, dev->mc_list, mc_count, 1679 err = __orinoco_hw_set_multicast_list(priv, dev, mc_count, promisc);
1680 promisc);
1681 1680
1682 return err; 1681 return err;
1683} 1682}
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index f27bb8367c98..1d4ada188eda 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -407,7 +407,6 @@ static struct pcmcia_device_id orinoco_cs_ids[] = {
407 PCMCIA_DEVICE_PROD_ID12("3Com", "3CRWE737A AirConnect Wireless LAN PC Card", 0x41240e5b, 0x56010af3), 407 PCMCIA_DEVICE_PROD_ID12("3Com", "3CRWE737A AirConnect Wireless LAN PC Card", 0x41240e5b, 0x56010af3),
408 PCMCIA_DEVICE_PROD_ID12("ACTIONTEC", "PRISM Wireless LAN PC Card", 0x393089da, 0xa71e69d5), 408 PCMCIA_DEVICE_PROD_ID12("ACTIONTEC", "PRISM Wireless LAN PC Card", 0x393089da, 0xa71e69d5),
409 PCMCIA_DEVICE_PROD_ID12("Addtron", "AWP-100 Wireless PCMCIA", 0xe6ec52ce, 0x08649af2), 409 PCMCIA_DEVICE_PROD_ID12("Addtron", "AWP-100 Wireless PCMCIA", 0xe6ec52ce, 0x08649af2),
410 PCMCIA_DEVICE_PROD_ID123("AIRVAST", "IEEE 802.11b Wireless PCMCIA Card", "HFA3863", 0xea569531, 0x4bcb9645, 0x355cb092),
411 PCMCIA_DEVICE_PROD_ID12("Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio", 0x5cd01705, 0x4271660f), 410 PCMCIA_DEVICE_PROD_ID12("Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio", 0x5cd01705, 0x4271660f),
412 PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11b_PC_CARD_25", 0x78fc06ee, 0xdb9aa842), 411 PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11b_PC_CARD_25", 0x78fc06ee, 0xdb9aa842),
413 PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11B_CF_CARD_25", 0x78fc06ee, 0x45a50c1e), 412 PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11B_CF_CARD_25", 0x78fc06ee, 0x45a50c1e),
@@ -417,7 +416,6 @@ static struct pcmcia_device_id orinoco_cs_ids[] = {
417 PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G", 0x2decece3, 0x82067c18), 416 PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G", 0x2decece3, 0x82067c18),
418 PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90), 417 PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90),
419 PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card", 0x54f7c49c, 0x15a75e5b), 418 PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card", 0x54f7c49c, 0x15a75e5b),
420 PCMCIA_DEVICE_PROD_ID123("corega", "WL PCCL-11", "ISL37300P", 0x0a21501a, 0x59868926, 0xc9049a39),
421 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCC-11", 0x5261440f, 0xa6405584), 419 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCC-11", 0x5261440f, 0xa6405584),
422 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCCA-11", 0x5261440f, 0xdf6115f9), 420 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCCA-11", 0x5261440f, 0xdf6115f9),
423 PCMCIA_DEVICE_PROD_ID12("corega_K.K.", "Wireless_LAN_PCCB-11", 0x29e33311, 0xee7a27ae), 421 PCMCIA_DEVICE_PROD_ID12("corega_K.K.", "Wireless_LAN_PCCB-11", 0x29e33311, 0xee7a27ae),
@@ -432,7 +430,6 @@ static struct pcmcia_device_id orinoco_cs_ids[] = {
432 PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE", 0x74c5e40d, 0xdb472a18), 430 PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE", 0x74c5e40d, 0xdb472a18),
433 PCMCIA_DEVICE_PROD_ID12("INTERSIL", "I-GATE 11M PC Card / PC Card plus", 0x74c5e40d, 0x8304ff77), 431 PCMCIA_DEVICE_PROD_ID12("INTERSIL", "I-GATE 11M PC Card / PC Card plus", 0x74c5e40d, 0x8304ff77),
434 PCMCIA_DEVICE_PROD_ID12("Intersil", "PRISM 2_5 PCMCIA ADAPTER", 0x4b801a17, 0x6345a0bf), 432 PCMCIA_DEVICE_PROD_ID12("Intersil", "PRISM 2_5 PCMCIA ADAPTER", 0x4b801a17, 0x6345a0bf),
435 PCMCIA_DEVICE_PROD_ID123("Intersil", "PRISM Freedom PCMCIA Adapter", "ISL37100P", 0x4b801a17, 0xf222ec2d, 0x630d52b2),
436 PCMCIA_DEVICE_PROD_ID12("LeArtery", "SYNCBYAIR 11Mbps Wireless LAN PC Card", 0x7e3b326a, 0x49893e92), 433 PCMCIA_DEVICE_PROD_ID12("LeArtery", "SYNCBYAIR 11Mbps Wireless LAN PC Card", 0x7e3b326a, 0x49893e92),
437 PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card", 0x0733cc81, 0x0c52f395), 434 PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card", 0x0733cc81, 0x0c52f395),
438 PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/IEEE", 0x23eb9949, 0xc562e72a), 435 PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/IEEE", 0x23eb9949, 0xc562e72a),
@@ -445,7 +442,6 @@ static struct pcmcia_device_id orinoco_cs_ids[] = {
445 PCMCIA_DEVICE_PROD_ID12("Nortel Networks", "emobility 802.11 Wireless LAN PC Card", 0x2d617ea0, 0x88cd5767), 442 PCMCIA_DEVICE_PROD_ID12("Nortel Networks", "emobility 802.11 Wireless LAN PC Card", 0x2d617ea0, 0x88cd5767),
446 PCMCIA_DEVICE_PROD_ID12("OEM", "PRISM2 IEEE 802.11 PC-Card", 0xfea54c90, 0x48f2bdd6), 443 PCMCIA_DEVICE_PROD_ID12("OEM", "PRISM2 IEEE 802.11 PC-Card", 0xfea54c90, 0x48f2bdd6),
447 PCMCIA_DEVICE_PROD_ID12("OTC", "Wireless AirEZY 2411-PCC WLAN Card", 0x4ac44287, 0x235a6bed), 444 PCMCIA_DEVICE_PROD_ID12("OTC", "Wireless AirEZY 2411-PCC WLAN Card", 0x4ac44287, 0x235a6bed),
448 PCMCIA_DEVICE_PROD_ID123("PCMCIA", "11M WLAN Card v2.5", "ISL37300P", 0x281f1c5d, 0x6e440487, 0xc9049a39),
449 PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-CF110", 0x209f40ab, 0xd9715264), 445 PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-CF110", 0x209f40ab, 0xd9715264),
450 PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-NS110", 0x209f40ab, 0x46263178), 446 PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-NS110", 0x209f40ab, 0x46263178),
451 PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9), 447 PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9),
@@ -454,8 +450,11 @@ static struct pcmcia_device_id orinoco_cs_ids[] = {
454 PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2532W-B EliteConnect Wireless Adapter", 0xc4f8b18b, 0x196bd757), 450 PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2532W-B EliteConnect Wireless Adapter", 0xc4f8b18b, 0x196bd757),
455 PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2632W", 0xc4f8b18b, 0x474a1f2a), 451 PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2632W", 0xc4f8b18b, 0x474a1f2a),
456 PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e), 452 PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e),
457 PCMCIA_DEVICE_PROD_ID123("The Linksys Group, Inc.", "Instant Wireless Network PC Card", "ISL37300P", 0xa5f472c2, 0x590eb502, 0xc9049a39),
458 PCMCIA_DEVICE_PROD_ID12("ZoomAir 11Mbps High", "Rate wireless Networking", 0x273fe3db, 0x32a1eaee), 453 PCMCIA_DEVICE_PROD_ID12("ZoomAir 11Mbps High", "Rate wireless Networking", 0x273fe3db, 0x32a1eaee),
454 PCMCIA_DEVICE_PROD_ID3("HFA3863", 0x355cb092),
455 PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2),
456 PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b),
457 PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39),
459 PCMCIA_DEVICE_NULL, 458 PCMCIA_DEVICE_NULL,
460}; 459};
461MODULE_DEVICE_TABLE(pcmcia, orinoco_cs_ids); 460MODULE_DEVICE_TABLE(pcmcia, orinoco_cs_ids);
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index c13a4c383410..075f446b3139 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -274,7 +274,7 @@ static void __devexit orinoco_nortel_remove_one(struct pci_dev *pdev)
274 pci_disable_device(pdev); 274 pci_disable_device(pdev);
275} 275}
276 276
277static struct pci_device_id orinoco_nortel_id_table[] = { 277static DEFINE_PCI_DEVICE_TABLE(orinoco_nortel_id_table) = {
278 /* Nortel emobility PCI */ 278 /* Nortel emobility PCI */
279 {0x126c, 0x8030, PCI_ANY_ID, PCI_ANY_ID,}, 279 {0x126c, 0x8030, PCI_ANY_ID, PCI_ANY_ID,},
280 /* Symbol LA-4123 PCI */ 280 /* Symbol LA-4123 PCI */
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index fea7781948e7..bda5317cc596 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -212,7 +212,7 @@ static void __devexit orinoco_pci_remove_one(struct pci_dev *pdev)
212 pci_disable_device(pdev); 212 pci_disable_device(pdev);
213} 213}
214 214
215static struct pci_device_id orinoco_pci_id_table[] = { 215static DEFINE_PCI_DEVICE_TABLE(orinoco_pci_id_table) = {
216 /* Intersil Prism 3 */ 216 /* Intersil Prism 3 */
217 {0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID,}, 217 {0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID,},
218 /* Intersil Prism 2.5 */ 218 /* Intersil Prism 2.5 */
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index 3f2942a1e4f5..e0d5874ab42f 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -310,7 +310,7 @@ static void __devexit orinoco_plx_remove_one(struct pci_dev *pdev)
310 pci_disable_device(pdev); 310 pci_disable_device(pdev);
311} 311}
312 312
313static struct pci_device_id orinoco_plx_id_table[] = { 313static DEFINE_PCI_DEVICE_TABLE(orinoco_plx_id_table) = {
314 {0x111a, 0x1023, PCI_ANY_ID, PCI_ANY_ID,}, /* Siemens SpeedStream SS1023 */ 314 {0x111a, 0x1023, PCI_ANY_ID, PCI_ANY_ID,}, /* Siemens SpeedStream SS1023 */
315 {0x1385, 0x4100, PCI_ANY_ID, PCI_ANY_ID,}, /* Netgear MA301 */ 315 {0x1385, 0x4100, PCI_ANY_ID, PCI_ANY_ID,}, /* Netgear MA301 */
316 {0x15e8, 0x0130, PCI_ANY_ID, PCI_ANY_ID,}, /* Correga - does this work? */ 316 {0x15e8, 0x0130, PCI_ANY_ID, PCI_ANY_ID,}, /* Correga - does this work? */
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index d3452548cc71..88cbc7902aa0 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -203,7 +203,7 @@ static void __devexit orinoco_tmd_remove_one(struct pci_dev *pdev)
203 pci_disable_device(pdev); 203 pci_disable_device(pdev);
204} 204}
205 205
206static struct pci_device_id orinoco_tmd_id_table[] = { 206static DEFINE_PCI_DEVICE_TABLE(orinoco_tmd_id_table) = {
207 {0x15e8, 0x0131, PCI_ANY_ID, PCI_ANY_ID,}, /* NDC and OEMs, e.g. pheecom */ 207 {0x15e8, 0x0131, PCI_ANY_ID, PCI_ANY_ID,}, /* NDC and OEMs, e.g. pheecom */
208 {0,}, 208 {0,},
209}; 209};
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 18012dbfb45d..4f752a21495f 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -33,21 +33,29 @@ MODULE_DESCRIPTION("Softmac Prism54 common code");
33MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
34MODULE_ALIAS("prism54common"); 34MODULE_ALIAS("prism54common");
35 35
36static int p54_sta_add_remove(struct ieee80211_hw *hw,
37 struct ieee80211_vif *vif,
38 struct ieee80211_sta *sta)
39{
40 struct p54_common *priv = hw->priv;
41
42 /*
43 * Notify the firmware that we don't want or we don't
44 * need to buffer frames for this station anymore.
45 */
46
47 p54_sta_unlock(priv, sta->addr);
48
49 return 0;
50}
51
36static void p54_sta_notify(struct ieee80211_hw *dev, struct ieee80211_vif *vif, 52static void p54_sta_notify(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
37 enum sta_notify_cmd notify_cmd, 53 enum sta_notify_cmd notify_cmd,
38 struct ieee80211_sta *sta) 54 struct ieee80211_sta *sta)
39{ 55{
40 struct p54_common *priv = dev->priv; 56 struct p54_common *priv = dev->priv;
41 switch (notify_cmd) {
42 case STA_NOTIFY_ADD:
43 case STA_NOTIFY_REMOVE:
44 /*
45 * Notify the firmware that we don't want or we don't
46 * need to buffer frames for this station anymore.
47 */
48 57
49 p54_sta_unlock(priv, sta->addr); 58 switch (notify_cmd) {
50 break;
51 case STA_NOTIFY_AWAKE: 59 case STA_NOTIFY_AWAKE:
52 /* update the firmware's filter table */ 60 /* update the firmware's filter table */
53 p54_sta_unlock(priv, sta->addr); 61 p54_sta_unlock(priv, sta->addr);
@@ -216,7 +224,7 @@ static void p54_stop(struct ieee80211_hw *dev)
216} 224}
217 225
218static int p54_add_interface(struct ieee80211_hw *dev, 226static int p54_add_interface(struct ieee80211_hw *dev,
219 struct ieee80211_if_init_conf *conf) 227 struct ieee80211_vif *vif)
220{ 228{
221 struct p54_common *priv = dev->priv; 229 struct p54_common *priv = dev->priv;
222 230
@@ -226,28 +234,28 @@ static int p54_add_interface(struct ieee80211_hw *dev,
226 return -EOPNOTSUPP; 234 return -EOPNOTSUPP;
227 } 235 }
228 236
229 priv->vif = conf->vif; 237 priv->vif = vif;
230 238
231 switch (conf->type) { 239 switch (vif->type) {
232 case NL80211_IFTYPE_STATION: 240 case NL80211_IFTYPE_STATION:
233 case NL80211_IFTYPE_ADHOC: 241 case NL80211_IFTYPE_ADHOC:
234 case NL80211_IFTYPE_AP: 242 case NL80211_IFTYPE_AP:
235 case NL80211_IFTYPE_MESH_POINT: 243 case NL80211_IFTYPE_MESH_POINT:
236 priv->mode = conf->type; 244 priv->mode = vif->type;
237 break; 245 break;
238 default: 246 default:
239 mutex_unlock(&priv->conf_mutex); 247 mutex_unlock(&priv->conf_mutex);
240 return -EOPNOTSUPP; 248 return -EOPNOTSUPP;
241 } 249 }
242 250
243 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN); 251 memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
244 p54_setup_mac(priv); 252 p54_setup_mac(priv);
245 mutex_unlock(&priv->conf_mutex); 253 mutex_unlock(&priv->conf_mutex);
246 return 0; 254 return 0;
247} 255}
248 256
249static void p54_remove_interface(struct ieee80211_hw *dev, 257static void p54_remove_interface(struct ieee80211_hw *dev,
250 struct ieee80211_if_init_conf *conf) 258 struct ieee80211_vif *vif)
251{ 259{
252 struct p54_common *priv = dev->priv; 260 struct p54_common *priv = dev->priv;
253 261
@@ -358,16 +366,6 @@ static int p54_get_stats(struct ieee80211_hw *dev,
358 return 0; 366 return 0;
359} 367}
360 368
361static int p54_get_tx_stats(struct ieee80211_hw *dev,
362 struct ieee80211_tx_queue_stats *stats)
363{
364 struct p54_common *priv = dev->priv;
365
366 memcpy(stats, &priv->tx_stats[P54_QUEUE_DATA],
367 sizeof(stats[0]) * dev->queues);
368 return 0;
369}
370
371static void p54_bss_info_changed(struct ieee80211_hw *dev, 369static void p54_bss_info_changed(struct ieee80211_hw *dev,
372 struct ieee80211_vif *vif, 370 struct ieee80211_vif *vif,
373 struct ieee80211_bss_conf *info, 371 struct ieee80211_bss_conf *info,
@@ -516,13 +514,14 @@ static const struct ieee80211_ops p54_ops = {
516 .remove_interface = p54_remove_interface, 514 .remove_interface = p54_remove_interface,
517 .set_tim = p54_set_tim, 515 .set_tim = p54_set_tim,
518 .sta_notify = p54_sta_notify, 516 .sta_notify = p54_sta_notify,
517 .sta_add = p54_sta_add_remove,
518 .sta_remove = p54_sta_add_remove,
519 .set_key = p54_set_key, 519 .set_key = p54_set_key,
520 .config = p54_config, 520 .config = p54_config,
521 .bss_info_changed = p54_bss_info_changed, 521 .bss_info_changed = p54_bss_info_changed,
522 .configure_filter = p54_configure_filter, 522 .configure_filter = p54_configure_filter,
523 .conf_tx = p54_conf_tx, 523 .conf_tx = p54_conf_tx,
524 .get_stats = p54_get_stats, 524 .get_stats = p54_get_stats,
525 .get_tx_stats = p54_get_tx_stats
526}; 525};
527 526
528struct ieee80211_hw *p54_init_common(size_t priv_data_len) 527struct ieee80211_hw *p54_init_common(size_t priv_data_len)
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index 1afc39410e85..43a3b2ead81a 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -157,6 +157,12 @@ struct p54_led_dev {
157 157
158#endif /* CONFIG_P54_LEDS */ 158#endif /* CONFIG_P54_LEDS */
159 159
160struct p54_tx_queue_stats {
161 unsigned int len;
162 unsigned int limit;
163 unsigned int count;
164};
165
160struct p54_common { 166struct p54_common {
161 struct ieee80211_hw *hw; 167 struct ieee80211_hw *hw;
162 struct ieee80211_vif *vif; 168 struct ieee80211_vif *vif;
@@ -183,7 +189,7 @@ struct p54_common {
183 /* (e)DCF / QOS state */ 189 /* (e)DCF / QOS state */
184 bool use_short_slot; 190 bool use_short_slot;
185 spinlock_t tx_stats_lock; 191 spinlock_t tx_stats_lock;
186 struct ieee80211_tx_queue_stats tx_stats[8]; 192 struct p54_tx_queue_stats tx_stats[8];
187 struct p54_edcf_queue_param qos_params[8]; 193 struct p54_edcf_queue_param qos_params[8];
188 194
189 /* Radio data */ 195 /* Radio data */
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index a72f7c2577de..ed4bdffdd63e 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -31,7 +31,7 @@ MODULE_LICENSE("GPL");
31MODULE_ALIAS("prism54pci"); 31MODULE_ALIAS("prism54pci");
32MODULE_FIRMWARE("isl3886pci"); 32MODULE_FIRMWARE("isl3886pci");
33 33
34static struct pci_device_id p54p_table[] __devinitdata = { 34static DEFINE_PCI_DEVICE_TABLE(p54p_table) = {
35 /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */ 35 /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
36 { PCI_DEVICE(0x1260, 0x3890) }, 36 { PCI_DEVICE(0x1260, 0x3890) },
37 /* 3COM 3CRWE154G72 Wireless LAN adapter */ 37 /* 3COM 3CRWE154G72 Wireless LAN adapter */
@@ -157,6 +157,14 @@ static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
157 skb_tail_pointer(skb), 157 skb_tail_pointer(skb),
158 priv->common.rx_mtu + 32, 158 priv->common.rx_mtu + 32,
159 PCI_DMA_FROMDEVICE); 159 PCI_DMA_FROMDEVICE);
160
161 if (pci_dma_mapping_error(priv->pdev, mapping)) {
162 dev_kfree_skb_any(skb);
163 dev_err(&priv->pdev->dev,
164 "RX DMA Mapping error\n");
165 break;
166 }
167
160 desc->host_addr = cpu_to_le32(mapping); 168 desc->host_addr = cpu_to_le32(mapping);
161 desc->device_addr = 0; // FIXME: necessary? 169 desc->device_addr = 0; // FIXME: necessary?
162 desc->len = cpu_to_le16(priv->common.rx_mtu + 32); 170 desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
@@ -226,14 +234,14 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
226 p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf); 234 p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf);
227} 235}
228 236
229/* caller must hold priv->lock */
230static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index, 237static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
231 int ring_index, struct p54p_desc *ring, u32 ring_limit, 238 int ring_index, struct p54p_desc *ring, u32 ring_limit,
232 void **tx_buf) 239 struct sk_buff **tx_buf)
233{ 240{
234 struct p54p_priv *priv = dev->priv; 241 struct p54p_priv *priv = dev->priv;
235 struct p54p_ring_control *ring_control = priv->ring_control; 242 struct p54p_ring_control *ring_control = priv->ring_control;
236 struct p54p_desc *desc; 243 struct p54p_desc *desc;
244 struct sk_buff *skb;
237 u32 idx, i; 245 u32 idx, i;
238 246
239 i = (*index) % ring_limit; 247 i = (*index) % ring_limit;
@@ -242,9 +250,8 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
242 250
243 while (i != idx) { 251 while (i != idx) {
244 desc = &ring[i]; 252 desc = &ring[i];
245 if (tx_buf[i]) 253
246 if (FREE_AFTER_TX((struct sk_buff *) tx_buf[i])) 254 skb = tx_buf[i];
247 p54_free_skb(dev, tx_buf[i]);
248 tx_buf[i] = NULL; 255 tx_buf[i] = NULL;
249 256
250 pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), 257 pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr),
@@ -255,17 +262,28 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
255 desc->len = 0; 262 desc->len = 0;
256 desc->flags = 0; 263 desc->flags = 0;
257 264
265 if (skb && FREE_AFTER_TX(skb))
266 p54_free_skb(dev, skb);
267
258 i++; 268 i++;
259 i %= ring_limit; 269 i %= ring_limit;
260 } 270 }
261} 271}
262 272
263static void p54p_rx_tasklet(unsigned long dev_id) 273static void p54p_tasklet(unsigned long dev_id)
264{ 274{
265 struct ieee80211_hw *dev = (struct ieee80211_hw *)dev_id; 275 struct ieee80211_hw *dev = (struct ieee80211_hw *)dev_id;
266 struct p54p_priv *priv = dev->priv; 276 struct p54p_priv *priv = dev->priv;
267 struct p54p_ring_control *ring_control = priv->ring_control; 277 struct p54p_ring_control *ring_control = priv->ring_control;
268 278
279 p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt,
280 ARRAY_SIZE(ring_control->tx_mgmt),
281 priv->tx_buf_mgmt);
282
283 p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data,
284 ARRAY_SIZE(ring_control->tx_data),
285 priv->tx_buf_data);
286
269 p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt, 287 p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt,
270 ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt); 288 ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt);
271 289
@@ -280,59 +298,49 @@ static irqreturn_t p54p_interrupt(int irq, void *dev_id)
280{ 298{
281 struct ieee80211_hw *dev = dev_id; 299 struct ieee80211_hw *dev = dev_id;
282 struct p54p_priv *priv = dev->priv; 300 struct p54p_priv *priv = dev->priv;
283 struct p54p_ring_control *ring_control = priv->ring_control;
284 __le32 reg; 301 __le32 reg;
285 302
286 spin_lock(&priv->lock);
287 reg = P54P_READ(int_ident); 303 reg = P54P_READ(int_ident);
288 if (unlikely(reg == cpu_to_le32(0xFFFFFFFF))) { 304 if (unlikely(reg == cpu_to_le32(0xFFFFFFFF))) {
289 spin_unlock(&priv->lock); 305 goto out;
290 return IRQ_HANDLED;
291 } 306 }
292
293 P54P_WRITE(int_ack, reg); 307 P54P_WRITE(int_ack, reg);
294 308
295 reg &= P54P_READ(int_enable); 309 reg &= P54P_READ(int_enable);
296 310
297 if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)) { 311 if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE))
298 p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 312 tasklet_schedule(&priv->tasklet);
299 3, ring_control->tx_mgmt, 313 else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))
300 ARRAY_SIZE(ring_control->tx_mgmt),
301 priv->tx_buf_mgmt);
302
303 p54p_check_tx_ring(dev, &priv->tx_idx_data,
304 1, ring_control->tx_data,
305 ARRAY_SIZE(ring_control->tx_data),
306 priv->tx_buf_data);
307
308 tasklet_schedule(&priv->rx_tasklet);
309
310 } else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))
311 complete(&priv->boot_comp); 314 complete(&priv->boot_comp);
312 315
313 spin_unlock(&priv->lock); 316out:
314
315 return reg ? IRQ_HANDLED : IRQ_NONE; 317 return reg ? IRQ_HANDLED : IRQ_NONE;
316} 318}
317 319
318static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 320static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
319{ 321{
322 unsigned long flags;
320 struct p54p_priv *priv = dev->priv; 323 struct p54p_priv *priv = dev->priv;
321 struct p54p_ring_control *ring_control = priv->ring_control; 324 struct p54p_ring_control *ring_control = priv->ring_control;
322 unsigned long flags;
323 struct p54p_desc *desc; 325 struct p54p_desc *desc;
324 dma_addr_t mapping; 326 dma_addr_t mapping;
325 u32 device_idx, idx, i; 327 u32 device_idx, idx, i;
326 328
327 spin_lock_irqsave(&priv->lock, flags); 329 spin_lock_irqsave(&priv->lock, flags);
328
329 device_idx = le32_to_cpu(ring_control->device_idx[1]); 330 device_idx = le32_to_cpu(ring_control->device_idx[1]);
330 idx = le32_to_cpu(ring_control->host_idx[1]); 331 idx = le32_to_cpu(ring_control->host_idx[1]);
331 i = idx % ARRAY_SIZE(ring_control->tx_data); 332 i = idx % ARRAY_SIZE(ring_control->tx_data);
332 333
333 priv->tx_buf_data[i] = skb;
334 mapping = pci_map_single(priv->pdev, skb->data, skb->len, 334 mapping = pci_map_single(priv->pdev, skb->data, skb->len,
335 PCI_DMA_TODEVICE); 335 PCI_DMA_TODEVICE);
336 if (pci_dma_mapping_error(priv->pdev, mapping)) {
337 spin_unlock_irqrestore(&priv->lock, flags);
338 p54_free_skb(dev, skb);
339 dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
340 return ;
341 }
342 priv->tx_buf_data[i] = skb;
343
336 desc = &ring_control->tx_data[i]; 344 desc = &ring_control->tx_data[i];
337 desc->host_addr = cpu_to_le32(mapping); 345 desc->host_addr = cpu_to_le32(mapping);
338 desc->device_addr = ((struct p54_hdr *)skb->data)->req_id; 346 desc->device_addr = ((struct p54_hdr *)skb->data)->req_id;
@@ -354,14 +362,14 @@ static void p54p_stop(struct ieee80211_hw *dev)
354 unsigned int i; 362 unsigned int i;
355 struct p54p_desc *desc; 363 struct p54p_desc *desc;
356 364
357 tasklet_kill(&priv->rx_tasklet);
358
359 P54P_WRITE(int_enable, cpu_to_le32(0)); 365 P54P_WRITE(int_enable, cpu_to_le32(0));
360 P54P_READ(int_enable); 366 P54P_READ(int_enable);
361 udelay(10); 367 udelay(10);
362 368
363 free_irq(priv->pdev->irq, dev); 369 free_irq(priv->pdev->irq, dev);
364 370
371 tasklet_kill(&priv->tasklet);
372
365 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); 373 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
366 374
367 for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) { 375 for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) {
@@ -545,7 +553,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
545 priv->common.tx = p54p_tx; 553 priv->common.tx = p54p_tx;
546 554
547 spin_lock_init(&priv->lock); 555 spin_lock_init(&priv->lock);
548 tasklet_init(&priv->rx_tasklet, p54p_rx_tasklet, (unsigned long)dev); 556 tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev);
549 557
550 err = request_firmware(&priv->firmware, "isl3886pci", 558 err = request_firmware(&priv->firmware, "isl3886pci",
551 &priv->pdev->dev); 559 &priv->pdev->dev);
diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h
index fbb683953fb2..2feead617a3b 100644
--- a/drivers/net/wireless/p54/p54pci.h
+++ b/drivers/net/wireless/p54/p54pci.h
@@ -92,7 +92,7 @@ struct p54p_priv {
92 struct p54_common common; 92 struct p54_common common;
93 struct pci_dev *pdev; 93 struct pci_dev *pdev;
94 struct p54p_csr __iomem *map; 94 struct p54p_csr __iomem *map;
95 struct tasklet_struct rx_tasklet; 95 struct tasklet_struct tasklet;
96 const struct firmware *firmware; 96 const struct firmware *firmware;
97 spinlock_t lock; 97 spinlock_t lock;
98 struct p54p_ring_control *ring_control; 98 struct p54p_ring_control *ring_control;
@@ -101,8 +101,8 @@ struct p54p_priv {
101 u32 rx_idx_mgmt, tx_idx_mgmt; 101 u32 rx_idx_mgmt, tx_idx_mgmt;
102 struct sk_buff *rx_buf_data[8]; 102 struct sk_buff *rx_buf_data[8];
103 struct sk_buff *rx_buf_mgmt[4]; 103 struct sk_buff *rx_buf_mgmt[4];
104 void *tx_buf_data[32]; 104 struct sk_buff *tx_buf_data[32];
105 void *tx_buf_mgmt[4]; 105 struct sk_buff *tx_buf_mgmt[4];
106 struct completion boot_comp; 106 struct completion boot_comp;
107}; 107};
108 108
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 92af9b96bb7a..b3c4fbd80d8d 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -36,6 +36,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
36 /* Version 1 devices (pci chip + net2280) */ 36 /* Version 1 devices (pci chip + net2280) */
37 {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */ 37 {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */
38 {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */ 38 {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */
39 {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */
39 {USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */ 40 {USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */
40 {USB_DEVICE(0x083a, 0x4502)}, /* Siemens Gigaset USB Adapter */ 41 {USB_DEVICE(0x083a, 0x4502)}, /* Siemens Gigaset USB Adapter */
41 {USB_DEVICE(0x083a, 0x5501)}, /* Phillips CPWUA054 */ 42 {USB_DEVICE(0x083a, 0x5501)}, /* Phillips CPWUA054 */
@@ -60,6 +61,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
60 {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ 61 {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
61 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ 62 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
62 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ 63 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
64 {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */
63 {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */ 65 {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */
64 {USB_DEVICE(0x0915, 0x2000)}, /* Cohiba Proto board */ 66 {USB_DEVICE(0x0915, 0x2000)}, /* Cohiba Proto board */
65 {USB_DEVICE(0x0915, 0x2002)}, /* Cohiba Proto board */ 67 {USB_DEVICE(0x0915, 0x2002)}, /* Cohiba Proto board */
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index b6dda2b27fb5..66057999a93c 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -183,10 +183,10 @@ static int p54_tx_qos_accounting_alloc(struct p54_common *priv,
183 struct sk_buff *skb, 183 struct sk_buff *skb,
184 const u16 p54_queue) 184 const u16 p54_queue)
185{ 185{
186 struct ieee80211_tx_queue_stats *queue; 186 struct p54_tx_queue_stats *queue;
187 unsigned long flags; 187 unsigned long flags;
188 188
189 if (WARN_ON(p54_queue > P54_QUEUE_NUM)) 189 if (WARN_ON(p54_queue >= P54_QUEUE_NUM))
190 return -EINVAL; 190 return -EINVAL;
191 191
192 queue = &priv->tx_stats[p54_queue]; 192 queue = &priv->tx_stats[p54_queue];
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index e4f2bb7368f2..dc14420a9adc 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -39,7 +39,7 @@ module_param(init_pcitm, int, 0);
39 * driver_data 39 * driver_data
40 * If you have an update for this please contact prism54-devel@prism54.org 40 * If you have an update for this please contact prism54-devel@prism54.org
41 * The latest list can be found at http://prism54.org/supported_cards.php */ 41 * The latest list can be found at http://prism54.org/supported_cards.php */
42static const struct pci_device_id prism54_id_tbl[] = { 42static DEFINE_PCI_DEVICE_TABLE(prism54_id_tbl) = {
43 /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */ 43 /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
44 { 44 {
45 0x1260, 0x3890, 45 0x1260, 0x3890,
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 88e1e4e32b22..84c530aa52f9 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -1871,10 +1871,8 @@ static void ray_update_parm(struct net_device *dev, UCHAR objid, UCHAR *value,
1871/*===========================================================================*/ 1871/*===========================================================================*/
1872static void ray_update_multi_list(struct net_device *dev, int all) 1872static void ray_update_multi_list(struct net_device *dev, int all)
1873{ 1873{
1874 struct dev_mc_list *dmi, **dmip;
1875 int ccsindex; 1874 int ccsindex;
1876 struct ccs __iomem *pccs; 1875 struct ccs __iomem *pccs;
1877 int i = 0;
1878 ray_dev_t *local = netdev_priv(dev); 1876 ray_dev_t *local = netdev_priv(dev);
1879 struct pcmcia_device *link = local->finder; 1877 struct pcmcia_device *link = local->finder;
1880 void __iomem *p = local->sram + HOST_TO_ECF_BASE; 1878 void __iomem *p = local->sram + HOST_TO_ECF_BASE;
@@ -1895,9 +1893,11 @@ static void ray_update_multi_list(struct net_device *dev, int all)
1895 writeb(0xff, &pccs->var); 1893 writeb(0xff, &pccs->var);
1896 local->num_multi = 0xff; 1894 local->num_multi = 0xff;
1897 } else { 1895 } else {
1896 struct dev_mc_list *dmi;
1897 int i = 0;
1898
1898 /* Copy the kernel's list of MC addresses to card */ 1899 /* Copy the kernel's list of MC addresses to card */
1899 for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; 1900 netdev_for_each_mc_addr(dmi, dev) {
1900 dmip = &dmi->next) {
1901 memcpy_toio(p, dmi->dmi_addr, ETH_ALEN); 1901 memcpy_toio(p, dmi->dmi_addr, ETH_ALEN);
1902 dev_dbg(&link->dev, 1902 dev_dbg(&link->dev,
1903 "ray_update_multi add addr %02x%02x%02x%02x%02x%02x\n", 1903 "ray_update_multi add addr %02x%02x%02x%02x%02x%02x\n",
@@ -1950,7 +1950,7 @@ static void set_multicast_list(struct net_device *dev)
1950 if (dev->flags & IFF_ALLMULTI) 1950 if (dev->flags & IFF_ALLMULTI)
1951 ray_update_multi_list(dev, 1); 1951 ray_update_multi_list(dev, 1);
1952 else { 1952 else {
1953 if (local->num_multi != dev->mc_count) 1953 if (local->num_multi != netdev_mc_count(dev))
1954 ray_update_multi_list(dev, 0); 1954 ray_update_multi_list(dev, 0);
1955 } 1955 }
1956} /* end set_multicast_list */ 1956} /* end set_multicast_list */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 2ecbedb26e15..9f6d6bf06b8e 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -728,9 +728,9 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
728 ret = rndis_command(dev, u.header, buflen); 728 ret = rndis_command(dev, u.header, buflen);
729 priv->current_command_oid = 0; 729 priv->current_command_oid = 0;
730 if (ret < 0) 730 if (ret < 0)
731 devdbg(dev, "rndis_query_oid(%s): rndis_command() failed, %d " 731 netdev_dbg(dev->net, "%s(%s): rndis_command() failed, %d (%08x)\n",
732 "(%08x)", oid_to_string(oid), ret, 732 __func__, oid_to_string(oid), ret,
733 le32_to_cpu(u.get_c->status)); 733 le32_to_cpu(u.get_c->status));
734 734
735 if (ret == 0) { 735 if (ret == 0) {
736 memcpy(data, u.buf + le32_to_cpu(u.get_c->offset) + 8, *len); 736 memcpy(data, u.buf + le32_to_cpu(u.get_c->offset) + 8, *len);
@@ -741,9 +741,9 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
741 741
742 ret = rndis_error_status(u.get_c->status); 742 ret = rndis_error_status(u.get_c->status);
743 if (ret < 0) 743 if (ret < 0)
744 devdbg(dev, "rndis_query_oid(%s): device returned " 744 netdev_dbg(dev->net, "%s(%s): device returned error, 0x%08x (%d)\n",
745 "error, 0x%08x (%d)", oid_to_string(oid), 745 __func__, oid_to_string(oid),
746 le32_to_cpu(u.get_c->status), ret); 746 le32_to_cpu(u.get_c->status), ret);
747 } 747 }
748 748
749 mutex_unlock(&priv->command_lock); 749 mutex_unlock(&priv->command_lock);
@@ -791,17 +791,17 @@ static int rndis_set_oid(struct usbnet *dev, __le32 oid, void *data, int len)
791 ret = rndis_command(dev, u.header, buflen); 791 ret = rndis_command(dev, u.header, buflen);
792 priv->current_command_oid = 0; 792 priv->current_command_oid = 0;
793 if (ret < 0) 793 if (ret < 0)
794 devdbg(dev, "rndis_set_oid(%s): rndis_command() failed, %d " 794 netdev_dbg(dev->net, "%s(%s): rndis_command() failed, %d (%08x)\n",
795 "(%08x)", oid_to_string(oid), ret, 795 __func__, oid_to_string(oid), ret,
796 le32_to_cpu(u.set_c->status)); 796 le32_to_cpu(u.set_c->status));
797 797
798 if (ret == 0) { 798 if (ret == 0) {
799 ret = rndis_error_status(u.set_c->status); 799 ret = rndis_error_status(u.set_c->status);
800 800
801 if (ret < 0) 801 if (ret < 0)
802 devdbg(dev, "rndis_set_oid(%s): device returned error, " 802 netdev_dbg(dev->net, "%s(%s): device returned error, 0x%08x (%d)\n",
803 "0x%08x (%d)", oid_to_string(oid), 803 __func__, oid_to_string(oid),
804 le32_to_cpu(u.set_c->status), ret); 804 le32_to_cpu(u.set_c->status), ret);
805 } 805 }
806 806
807 mutex_unlock(&priv->command_lock); 807 mutex_unlock(&priv->command_lock);
@@ -870,11 +870,11 @@ static int rndis_set_config_parameter(struct usbnet *dev, char *param,
870#endif 870#endif
871 871
872 if (value_type == 2) 872 if (value_type == 2)
873 devdbg(dev, "setting config parameter: %s, value: %s", 873 netdev_dbg(dev->net, "setting config parameter: %s, value: %s\n",
874 param, (u8 *)value); 874 param, (u8 *)value);
875 else 875 else
876 devdbg(dev, "setting config parameter: %s, value: %d", 876 netdev_dbg(dev->net, "setting config parameter: %s, value: %d\n",
877 param, *(u32 *)value); 877 param, *(u32 *)value);
878 878
879 infobuf->name_offs = cpu_to_le32(sizeof(*infobuf)); 879 infobuf->name_offs = cpu_to_le32(sizeof(*infobuf));
880 infobuf->name_length = cpu_to_le32(param_len); 880 infobuf->name_length = cpu_to_le32(param_len);
@@ -897,20 +897,21 @@ static int rndis_set_config_parameter(struct usbnet *dev, char *param,
897 } 897 }
898 898
899#ifdef DEBUG 899#ifdef DEBUG
900 devdbg(dev, "info buffer (len: %d):", info_len); 900 netdev_dbg(dev->net, "info buffer (len: %d)\n", info_len);
901 for (i = 0; i < info_len; i += 12) { 901 for (i = 0; i < info_len; i += 12) {
902 u32 *tmp = (u32 *)((u8 *)infobuf + i); 902 u32 *tmp = (u32 *)((u8 *)infobuf + i);
903 devdbg(dev, "%08X:%08X:%08X", 903 netdev_dbg(dev->net, "%08X:%08X:%08X\n",
904 cpu_to_be32(tmp[0]), 904 cpu_to_be32(tmp[0]),
905 cpu_to_be32(tmp[1]), 905 cpu_to_be32(tmp[1]),
906 cpu_to_be32(tmp[2])); 906 cpu_to_be32(tmp[2]));
907 } 907 }
908#endif 908#endif
909 909
910 ret = rndis_set_oid(dev, OID_GEN_RNDIS_CONFIG_PARAMETER, 910 ret = rndis_set_oid(dev, OID_GEN_RNDIS_CONFIG_PARAMETER,
911 infobuf, info_len); 911 infobuf, info_len);
912 if (ret != 0) 912 if (ret != 0)
913 devdbg(dev, "setting rndis config parameter failed, %d.", ret); 913 netdev_dbg(dev->net, "setting rndis config parameter failed, %d\n",
914 ret);
914 915
915 kfree(infobuf); 916 kfree(infobuf);
916 return ret; 917 return ret;
@@ -945,13 +946,13 @@ static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid)
945 946
946 ret = rndis_set_oid(usbdev, OID_802_11_SSID, ssid, sizeof(*ssid)); 947 ret = rndis_set_oid(usbdev, OID_802_11_SSID, ssid, sizeof(*ssid));
947 if (ret < 0) { 948 if (ret < 0) {
948 devwarn(usbdev, "setting SSID failed (%08X)", ret); 949 netdev_warn(usbdev->net, "setting SSID failed (%08X)\n", ret);
949 return ret; 950 return ret;
950 } 951 }
951 if (ret == 0) { 952 if (ret == 0) {
952 memcpy(&priv->essid, ssid, sizeof(priv->essid)); 953 memcpy(&priv->essid, ssid, sizeof(priv->essid));
953 priv->radio_on = true; 954 priv->radio_on = true;
954 devdbg(usbdev, "set_essid: radio_on = true"); 955 netdev_dbg(usbdev->net, "%s(): radio_on = true\n", __func__);
955 } 956 }
956 957
957 return ret; 958 return ret;
@@ -963,7 +964,8 @@ static int set_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN])
963 964
964 ret = rndis_set_oid(usbdev, OID_802_11_BSSID, bssid, ETH_ALEN); 965 ret = rndis_set_oid(usbdev, OID_802_11_BSSID, bssid, ETH_ALEN);
965 if (ret < 0) { 966 if (ret < 0) {
966 devwarn(usbdev, "setting BSSID[%pM] failed (%08X)", bssid, ret); 967 netdev_warn(usbdev->net, "setting BSSID[%pM] failed (%08X)\n",
968 bssid, ret);
967 return ret; 969 return ret;
968 } 970 }
969 971
@@ -1021,7 +1023,8 @@ static int disassociate(struct usbnet *usbdev, bool reset_ssid)
1021 ret = rndis_set_oid(usbdev, OID_802_11_DISASSOCIATE, NULL, 0); 1023 ret = rndis_set_oid(usbdev, OID_802_11_DISASSOCIATE, NULL, 0);
1022 if (ret == 0) { 1024 if (ret == 0) {
1023 priv->radio_on = false; 1025 priv->radio_on = false;
1024 devdbg(usbdev, "disassociate: radio_on = false"); 1026 netdev_dbg(usbdev->net, "%s(): radio_on = false\n",
1027 __func__);
1025 1028
1026 if (reset_ssid) 1029 if (reset_ssid)
1027 msleep(100); 1030 msleep(100);
@@ -1054,8 +1057,8 @@ static int set_auth_mode(struct usbnet *usbdev, u32 wpa_version,
1054 __le32 tmp; 1057 __le32 tmp;
1055 int auth_mode, ret; 1058 int auth_mode, ret;
1056 1059
1057 devdbg(usbdev, "set_auth_mode: wpa_version=0x%x authalg=0x%x " 1060 netdev_dbg(usbdev->net, "%s(): wpa_version=0x%x authalg=0x%x keymgmt=0x%x\n",
1058 "keymgmt=0x%x", wpa_version, auth_type, keymgmt); 1061 __func__, wpa_version, auth_type, keymgmt);
1059 1062
1060 if (wpa_version & NL80211_WPA_VERSION_2) { 1063 if (wpa_version & NL80211_WPA_VERSION_2) {
1061 if (keymgmt & RNDIS_WLAN_KEY_MGMT_802_1X) 1064 if (keymgmt & RNDIS_WLAN_KEY_MGMT_802_1X)
@@ -1082,7 +1085,8 @@ static int set_auth_mode(struct usbnet *usbdev, u32 wpa_version,
1082 ret = rndis_set_oid(usbdev, OID_802_11_AUTHENTICATION_MODE, &tmp, 1085 ret = rndis_set_oid(usbdev, OID_802_11_AUTHENTICATION_MODE, &tmp,
1083 sizeof(tmp)); 1086 sizeof(tmp));
1084 if (ret != 0) { 1087 if (ret != 0) {
1085 devwarn(usbdev, "setting auth mode failed (%08X)", ret); 1088 netdev_warn(usbdev->net, "setting auth mode failed (%08X)\n",
1089 ret);
1086 return ret; 1090 return ret;
1087 } 1091 }
1088 1092
@@ -1098,7 +1102,8 @@ static int set_priv_filter(struct usbnet *usbdev)
1098 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); 1102 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1099 __le32 tmp; 1103 __le32 tmp;
1100 1104
1101 devdbg(usbdev, "set_priv_filter: wpa_version=0x%x", priv->wpa_version); 1105 netdev_dbg(usbdev->net, "%s(): wpa_version=0x%x\n",
1106 __func__, priv->wpa_version);
1102 1107
1103 if (priv->wpa_version & NL80211_WPA_VERSION_2 || 1108 if (priv->wpa_version & NL80211_WPA_VERSION_2 ||
1104 priv->wpa_version & NL80211_WPA_VERSION_1) 1109 priv->wpa_version & NL80211_WPA_VERSION_1)
@@ -1116,8 +1121,8 @@ static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
1116 __le32 tmp; 1121 __le32 tmp;
1117 int encr_mode, ret; 1122 int encr_mode, ret;
1118 1123
1119 devdbg(usbdev, "set_encr_mode: cipher_pair=0x%x cipher_group=0x%x", 1124 netdev_dbg(usbdev->net, "%s(): cipher_pair=0x%x cipher_group=0x%x\n",
1120 pairwise, groupwise); 1125 __func__, pairwise, groupwise);
1121 1126
1122 if (pairwise & RNDIS_WLAN_ALG_CCMP) 1127 if (pairwise & RNDIS_WLAN_ALG_CCMP)
1123 encr_mode = NDIS_80211_ENCR_CCMP_ENABLED; 1128 encr_mode = NDIS_80211_ENCR_CCMP_ENABLED;
@@ -1136,7 +1141,8 @@ static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
1136 ret = rndis_set_oid(usbdev, OID_802_11_ENCRYPTION_STATUS, &tmp, 1141 ret = rndis_set_oid(usbdev, OID_802_11_ENCRYPTION_STATUS, &tmp,
1137 sizeof(tmp)); 1142 sizeof(tmp));
1138 if (ret != 0) { 1143 if (ret != 0) {
1139 devwarn(usbdev, "setting encr mode failed (%08X)", ret); 1144 netdev_warn(usbdev->net, "setting encr mode failed (%08X)\n",
1145 ret);
1140 return ret; 1146 return ret;
1141 } 1147 }
1142 1148
@@ -1151,13 +1157,15 @@ static int set_infra_mode(struct usbnet *usbdev, int mode)
1151 __le32 tmp; 1157 __le32 tmp;
1152 int ret; 1158 int ret;
1153 1159
1154 devdbg(usbdev, "set_infra_mode: infra_mode=0x%x", priv->infra_mode); 1160 netdev_dbg(usbdev->net, "%s(): infra_mode=0x%x\n",
1161 __func__, priv->infra_mode);
1155 1162
1156 tmp = cpu_to_le32(mode); 1163 tmp = cpu_to_le32(mode);
1157 ret = rndis_set_oid(usbdev, OID_802_11_INFRASTRUCTURE_MODE, &tmp, 1164 ret = rndis_set_oid(usbdev, OID_802_11_INFRASTRUCTURE_MODE, &tmp,
1158 sizeof(tmp)); 1165 sizeof(tmp));
1159 if (ret != 0) { 1166 if (ret != 0) {
1160 devwarn(usbdev, "setting infra mode failed (%08X)", ret); 1167 netdev_warn(usbdev->net, "setting infra mode failed (%08X)\n",
1168 ret);
1161 return ret; 1169 return ret;
1162 } 1170 }
1163 1171
@@ -1174,7 +1182,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
1174{ 1182{
1175 __le32 tmp; 1183 __le32 tmp;
1176 1184
1177 devdbg(usbdev, "set_rts_threshold %i", rts_threshold); 1185 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
1178 1186
1179 if (rts_threshold < 0 || rts_threshold > 2347) 1187 if (rts_threshold < 0 || rts_threshold > 2347)
1180 rts_threshold = 2347; 1188 rts_threshold = 2347;
@@ -1188,7 +1196,7 @@ static int set_frag_threshold(struct usbnet *usbdev, u32 frag_threshold)
1188{ 1196{
1189 __le32 tmp; 1197 __le32 tmp;
1190 1198
1191 devdbg(usbdev, "set_frag_threshold %i", frag_threshold); 1199 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, frag_threshold);
1192 1200
1193 if (frag_threshold < 256 || frag_threshold > 2346) 1201 if (frag_threshold < 256 || frag_threshold > 2346)
1194 frag_threshold = 2346; 1202 frag_threshold = 2346;
@@ -1222,7 +1230,7 @@ static int set_channel(struct usbnet *usbdev, int channel)
1222 unsigned int dsconfig; 1230 unsigned int dsconfig;
1223 int len, ret; 1231 int len, ret;
1224 1232
1225 devdbg(usbdev, "set_channel(%d)", channel); 1233 netdev_dbg(usbdev->net, "%s(%d)\n", __func__, channel);
1226 1234
1227 /* this OID is valid only when not associated */ 1235 /* this OID is valid only when not associated */
1228 if (is_associated(usbdev)) 1236 if (is_associated(usbdev))
@@ -1233,7 +1241,8 @@ static int set_channel(struct usbnet *usbdev, int channel)
1233 len = sizeof(config); 1241 len = sizeof(config);
1234 ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len); 1242 ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
1235 if (ret < 0) { 1243 if (ret < 0) {
1236 devdbg(usbdev, "set_channel: querying configuration failed"); 1244 netdev_dbg(usbdev->net, "%s(): querying configuration failed\n",
1245 __func__);
1237 return ret; 1246 return ret;
1238 } 1247 }
1239 1248
@@ -1241,7 +1250,7 @@ static int set_channel(struct usbnet *usbdev, int channel)
1241 ret = rndis_set_oid(usbdev, OID_802_11_CONFIGURATION, &config, 1250 ret = rndis_set_oid(usbdev, OID_802_11_CONFIGURATION, &config,
1242 sizeof(config)); 1251 sizeof(config));
1243 1252
1244 devdbg(usbdev, "set_channel: %d -> %d", channel, ret); 1253 netdev_dbg(usbdev->net, "%s(): %d -> %d\n", __func__, channel, ret);
1245 1254
1246 return ret; 1255 return ret;
1247} 1256}
@@ -1255,7 +1264,8 @@ static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
1255 u32 cipher; 1264 u32 cipher;
1256 int ret; 1265 int ret;
1257 1266
1258 devdbg(usbdev, "add_wep_key(idx: %d, len: %d)", index, key_len); 1267 netdev_dbg(usbdev->net, "%s(idx: %d, len: %d)\n",
1268 __func__, index, key_len);
1259 1269
1260 if ((key_len != 5 && key_len != 13) || index < 0 || index > 3) 1270 if ((key_len != 5 && key_len != 13) || index < 0 || index > 3)
1261 return -EINVAL; 1271 return -EINVAL;
@@ -1277,15 +1287,15 @@ static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
1277 ret = set_encr_mode(usbdev, RNDIS_WLAN_ALG_WEP, 1287 ret = set_encr_mode(usbdev, RNDIS_WLAN_ALG_WEP,
1278 RNDIS_WLAN_ALG_NONE); 1288 RNDIS_WLAN_ALG_NONE);
1279 if (ret) 1289 if (ret)
1280 devwarn(usbdev, "encryption couldn't be enabled (%08X)", 1290 netdev_warn(usbdev->net, "encryption couldn't be enabled (%08X)\n",
1281 ret); 1291 ret);
1282 } 1292 }
1283 1293
1284 ret = rndis_set_oid(usbdev, OID_802_11_ADD_WEP, &ndis_key, 1294 ret = rndis_set_oid(usbdev, OID_802_11_ADD_WEP, &ndis_key,
1285 sizeof(ndis_key)); 1295 sizeof(ndis_key));
1286 if (ret != 0) { 1296 if (ret != 0) {
1287 devwarn(usbdev, "adding encryption key %d failed (%08X)", 1297 netdev_warn(usbdev->net, "adding encryption key %d failed (%08X)\n",
1288 index+1, ret); 1298 index + 1, ret);
1289 return ret; 1299 return ret;
1290 } 1300 }
1291 1301
@@ -1307,22 +1317,23 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
1307 int ret; 1317 int ret;
1308 1318
1309 if (index < 0 || index >= 4) { 1319 if (index < 0 || index >= 4) {
1310 devdbg(usbdev, "add_wpa_key: index out of range (%i)", index); 1320 netdev_dbg(usbdev->net, "%s(): index out of range (%i)\n",
1321 __func__, index);
1311 return -EINVAL; 1322 return -EINVAL;
1312 } 1323 }
1313 if (key_len > sizeof(ndis_key.material) || key_len < 0) { 1324 if (key_len > sizeof(ndis_key.material) || key_len < 0) {
1314 devdbg(usbdev, "add_wpa_key: key length out of range (%i)", 1325 netdev_dbg(usbdev->net, "%s(): key length out of range (%i)\n",
1315 key_len); 1326 __func__, key_len);
1316 return -EINVAL; 1327 return -EINVAL;
1317 } 1328 }
1318 if (flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ) { 1329 if (flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ) {
1319 if (!rx_seq || seq_len <= 0) { 1330 if (!rx_seq || seq_len <= 0) {
1320 devdbg(usbdev, "add_wpa_key: recv seq flag without" 1331 netdev_dbg(usbdev->net, "%s(): recv seq flag without buffer\n",
1321 "buffer"); 1332 __func__);
1322 return -EINVAL; 1333 return -EINVAL;
1323 } 1334 }
1324 if (rx_seq && seq_len > sizeof(ndis_key.rsc)) { 1335 if (rx_seq && seq_len > sizeof(ndis_key.rsc)) {
1325 devdbg(usbdev, "add_wpa_key: too big recv seq buffer"); 1336 netdev_dbg(usbdev->net, "%s(): too big recv seq buffer\n", __func__);
1326 return -EINVAL; 1337 return -EINVAL;
1327 } 1338 }
1328 } 1339 }
@@ -1330,15 +1341,16 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
1330 is_addr_ok = addr && !is_zero_ether_addr(addr) && 1341 is_addr_ok = addr && !is_zero_ether_addr(addr) &&
1331 !is_broadcast_ether_addr(addr); 1342 !is_broadcast_ether_addr(addr);
1332 if ((flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) && !is_addr_ok) { 1343 if ((flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) && !is_addr_ok) {
1333 devdbg(usbdev, "add_wpa_key: pairwise but bssid invalid (%pM)", 1344 netdev_dbg(usbdev->net, "%s(): pairwise but bssid invalid (%pM)\n",
1334 addr); 1345 __func__, addr);
1335 return -EINVAL; 1346 return -EINVAL;
1336 } 1347 }
1337 1348
1338 devdbg(usbdev, "add_wpa_key(%i): flags:%i%i%i", index, 1349 netdev_dbg(usbdev->net, "%s(%i): flags:%i%i%i\n",
1339 !!(flags & NDIS_80211_ADDKEY_TRANSMIT_KEY), 1350 __func__, index,
1340 !!(flags & NDIS_80211_ADDKEY_PAIRWISE_KEY), 1351 !!(flags & NDIS_80211_ADDKEY_TRANSMIT_KEY),
1341 !!(flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ)); 1352 !!(flags & NDIS_80211_ADDKEY_PAIRWISE_KEY),
1353 !!(flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ));
1342 1354
1343 memset(&ndis_key, 0, sizeof(ndis_key)); 1355 memset(&ndis_key, 0, sizeof(ndis_key));
1344 1356
@@ -1372,7 +1384,8 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
1372 1384
1373 ret = rndis_set_oid(usbdev, OID_802_11_ADD_KEY, &ndis_key, 1385 ret = rndis_set_oid(usbdev, OID_802_11_ADD_KEY, &ndis_key,
1374 le32_to_cpu(ndis_key.size)); 1386 le32_to_cpu(ndis_key.size));
1375 devdbg(usbdev, "add_wpa_key: OID_802_11_ADD_KEY -> %08X", ret); 1387 netdev_dbg(usbdev->net, "%s(): OID_802_11_ADD_KEY -> %08X\n",
1388 __func__, ret);
1376 if (ret != 0) 1389 if (ret != 0)
1377 return ret; 1390 return ret;
1378 1391
@@ -1401,7 +1414,7 @@ static int restore_key(struct usbnet *usbdev, int key_idx)
1401 1414
1402 key = priv->encr_keys[key_idx]; 1415 key = priv->encr_keys[key_idx];
1403 1416
1404 devdbg(usbdev, "restore_key: %i:%i", key_idx, key.len); 1417 netdev_dbg(usbdev->net, "%s(): %i:%i\n", __func__, key_idx, key.len);
1405 1418
1406 if (key.len == 0) 1419 if (key.len == 0)
1407 return 0; 1420 return 0;
@@ -1436,8 +1449,9 @@ static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid)
1436 1449
1437 is_wpa = is_wpa_key(priv, index); 1450 is_wpa = is_wpa_key(priv, index);
1438 1451
1439 devdbg(usbdev, "remove_key: %i:%s:%i", index, is_wpa ? "wpa" : "wep", 1452 netdev_dbg(usbdev->net, "%s(): %i:%s:%i\n",
1440 priv->encr_keys[index].len); 1453 __func__, index, is_wpa ? "wpa" : "wep",
1454 priv->encr_keys[index].len);
1441 1455
1442 clear_key(priv, index); 1456 clear_key(priv, index);
1443 1457
@@ -1464,9 +1478,9 @@ static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid)
1464 ret = rndis_set_oid(usbdev, OID_802_11_REMOVE_WEP, &keyindex, 1478 ret = rndis_set_oid(usbdev, OID_802_11_REMOVE_WEP, &keyindex,
1465 sizeof(keyindex)); 1479 sizeof(keyindex));
1466 if (ret != 0) { 1480 if (ret != 0) {
1467 devwarn(usbdev, 1481 netdev_warn(usbdev->net,
1468 "removing encryption key %d failed (%08X)", 1482 "removing encryption key %d failed (%08X)\n",
1469 index, ret); 1483 index, ret);
1470 return ret; 1484 return ret;
1471 } 1485 }
1472 } 1486 }
@@ -1488,29 +1502,29 @@ static void set_multicast_list(struct usbnet *usbdev)
1488 1502
1489 filter = RNDIS_PACKET_TYPE_DIRECTED | RNDIS_PACKET_TYPE_BROADCAST; 1503 filter = RNDIS_PACKET_TYPE_DIRECTED | RNDIS_PACKET_TYPE_BROADCAST;
1490 1504
1505 netif_addr_lock_bh(usbdev->net);
1491 if (usbdev->net->flags & IFF_PROMISC) { 1506 if (usbdev->net->flags & IFF_PROMISC) {
1492 filter |= RNDIS_PACKET_TYPE_PROMISCUOUS | 1507 filter |= RNDIS_PACKET_TYPE_PROMISCUOUS |
1493 RNDIS_PACKET_TYPE_ALL_LOCAL; 1508 RNDIS_PACKET_TYPE_ALL_LOCAL;
1494 } else if (usbdev->net->flags & IFF_ALLMULTI || 1509 } else if (usbdev->net->flags & IFF_ALLMULTI ||
1495 usbdev->net->mc_count > priv->multicast_size) { 1510 netdev_mc_count(usbdev->net) > priv->multicast_size) {
1496 filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST; 1511 filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST;
1497 } else if (usbdev->net->mc_count > 0) { 1512 } else if (!netdev_mc_empty(usbdev->net)) {
1498 size = min(priv->multicast_size, usbdev->net->mc_count); 1513 size = min(priv->multicast_size, netdev_mc_count(usbdev->net));
1499 buf = kmalloc(size * ETH_ALEN, GFP_KERNEL); 1514 buf = kmalloc(size * ETH_ALEN, GFP_KERNEL);
1500 if (!buf) { 1515 if (!buf) {
1501 devwarn(usbdev, 1516 netdev_warn(usbdev->net,
1502 "couldn't alloc %d bytes of memory", 1517 "couldn't alloc %d bytes of memory\n",
1503 size * ETH_ALEN); 1518 size * ETH_ALEN);
1519 netif_addr_unlock_bh(usbdev->net);
1504 return; 1520 return;
1505 } 1521 }
1506 1522
1507 mclist = usbdev->net->mc_list; 1523 i = 0;
1508 for (i = 0; i < size && mclist; mclist = mclist->next) { 1524 netdev_for_each_mc_addr(mclist, usbdev->net) {
1509 if (mclist->dmi_addrlen != ETH_ALEN) 1525 if (i == size)
1510 continue; 1526 break;
1511 1527 memcpy(buf + i++ * ETH_ALEN, mclist->dmi_addr, ETH_ALEN);
1512 memcpy(buf + i * ETH_ALEN, mclist->dmi_addr, ETH_ALEN);
1513 i++;
1514 } 1528 }
1515 1529
1516 ret = rndis_set_oid(usbdev, OID_802_3_MULTICAST_LIST, buf, 1530 ret = rndis_set_oid(usbdev, OID_802_3_MULTICAST_LIST, buf,
@@ -1520,21 +1534,22 @@ static void set_multicast_list(struct usbnet *usbdev)
1520 else 1534 else
1521 filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST; 1535 filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST;
1522 1536
1523 devdbg(usbdev, "OID_802_3_MULTICAST_LIST(%d, max: %d) -> %d", 1537 netdev_dbg(usbdev->net, "OID_802_3_MULTICAST_LIST(%d, max: %d) -> %d\n",
1524 i, priv->multicast_size, ret); 1538 i, priv->multicast_size, ret);
1525 1539
1526 kfree(buf); 1540 kfree(buf);
1527 } 1541 }
1542 netif_addr_unlock_bh(usbdev->net);
1528 1543
1529 ret = rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &filter, 1544 ret = rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &filter,
1530 sizeof(filter)); 1545 sizeof(filter));
1531 if (ret < 0) { 1546 if (ret < 0) {
1532 devwarn(usbdev, "couldn't set packet filter: %08x", 1547 netdev_warn(usbdev->net, "couldn't set packet filter: %08x\n",
1533 le32_to_cpu(filter)); 1548 le32_to_cpu(filter));
1534 } 1549 }
1535 1550
1536 devdbg(usbdev, "OID_GEN_CURRENT_PACKET_FILTER(%08x) -> %d", 1551 netdev_dbg(usbdev->net, "OID_GEN_CURRENT_PACKET_FILTER(%08x) -> %d\n",
1537 le32_to_cpu(filter), ret); 1552 le32_to_cpu(filter), ret);
1538} 1553}
1539 1554
1540/* 1555/*
@@ -1592,7 +1607,8 @@ static int rndis_set_tx_power(struct wiphy *wiphy, enum tx_power_setting type,
1592 struct rndis_wlan_private *priv = wiphy_priv(wiphy); 1607 struct rndis_wlan_private *priv = wiphy_priv(wiphy);
1593 struct usbnet *usbdev = priv->usbdev; 1608 struct usbnet *usbdev = priv->usbdev;
1594 1609
1595 devdbg(usbdev, "rndis_set_tx_power type:0x%x dbm:%i", type, dbm); 1610 netdev_dbg(usbdev->net, "%s(): type:0x%x dbm:%i\n",
1611 __func__, type, dbm);
1596 1612
1597 /* Device doesn't support changing txpower after initialization, only 1613 /* Device doesn't support changing txpower after initialization, only
1598 * turn off/on radio. Support 'auto' mode and setting same dBm that is 1614 * turn off/on radio. Support 'auto' mode and setting same dBm that is
@@ -1615,7 +1631,7 @@ static int rndis_get_tx_power(struct wiphy *wiphy, int *dbm)
1615 1631
1616 *dbm = get_bcm4320_power_dbm(priv); 1632 *dbm = get_bcm4320_power_dbm(priv);
1617 1633
1618 devdbg(usbdev, "rndis_get_tx_power dbm:%i", *dbm); 1634 netdev_dbg(usbdev->net, "%s(): dbm:%i\n", __func__, *dbm);
1619 1635
1620 return 0; 1636 return 0;
1621} 1637}
@@ -1629,7 +1645,7 @@ static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
1629 int ret; 1645 int ret;
1630 __le32 tmp; 1646 __le32 tmp;
1631 1647
1632 devdbg(usbdev, "cfg80211.scan"); 1648 netdev_dbg(usbdev->net, "cfg80211.scan\n");
1633 1649
1634 /* Get current bssid list from device before new scan, as new scan 1650 /* Get current bssid list from device before new scan, as new scan
1635 * clears internal bssid list. 1651 * clears internal bssid list.
@@ -1669,8 +1685,8 @@ static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev,
1669 int ie_len, bssid_len; 1685 int ie_len, bssid_len;
1670 u8 *ie; 1686 u8 *ie;
1671 1687
1672 devdbg(usbdev, " found bssid: '%.32s' [%pM]", bssid->ssid.essid, 1688 netdev_dbg(usbdev->net, " found bssid: '%.32s' [%pM]\n",
1673 bssid->mac); 1689 bssid->ssid.essid, bssid->mac);
1674 1690
1675 /* parse bssid structure */ 1691 /* parse bssid structure */
1676 bssid_len = le32_to_cpu(bssid->length); 1692 bssid_len = le32_to_cpu(bssid->length);
@@ -1712,7 +1728,7 @@ static int rndis_check_bssid_list(struct usbnet *usbdev)
1712 int ret = -EINVAL, len, count, bssid_len; 1728 int ret = -EINVAL, len, count, bssid_len;
1713 bool resized = false; 1729 bool resized = false;
1714 1730
1715 devdbg(usbdev, "check_bssid_list"); 1731 netdev_dbg(usbdev->net, "check_bssid_list\n");
1716 1732
1717 len = CONTROL_BUFFER_SIZE; 1733 len = CONTROL_BUFFER_SIZE;
1718resize_buf: 1734resize_buf:
@@ -1736,8 +1752,8 @@ resize_buf:
1736 bssid = bssid_list->bssid; 1752 bssid = bssid_list->bssid;
1737 bssid_len = le32_to_cpu(bssid->length); 1753 bssid_len = le32_to_cpu(bssid->length);
1738 count = le32_to_cpu(bssid_list->num_items); 1754 count = le32_to_cpu(bssid_list->num_items);
1739 devdbg(usbdev, "check_bssid_list: %d BSSIDs found (buflen: %d)", count, 1755 netdev_dbg(usbdev->net, "check_bssid_list: %d BSSIDs found (buflen: %d)\n",
1740 len); 1756 count, len);
1741 1757
1742 while (count && ((void *)bssid + bssid_len) <= (buf + len)) { 1758 while (count && ((void *)bssid + bssid_len) <= (buf + len)) {
1743 rndis_bss_info_update(usbdev, bssid); 1759 rndis_bss_info_update(usbdev, bssid);
@@ -1759,7 +1775,7 @@ static void rndis_get_scan_results(struct work_struct *work)
1759 struct usbnet *usbdev = priv->usbdev; 1775 struct usbnet *usbdev = priv->usbdev;
1760 int ret; 1776 int ret;
1761 1777
1762 devdbg(usbdev, "get_scan_results"); 1778 netdev_dbg(usbdev->net, "get_scan_results\n");
1763 1779
1764 if (!priv->scan_request) 1780 if (!priv->scan_request)
1765 return; 1781 return;
@@ -1793,7 +1809,7 @@ static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
1793 1809
1794 if (sme->crypto.n_ciphers_pairwise > 0 && 1810 if (sme->crypto.n_ciphers_pairwise > 0 &&
1795 pairwise == RNDIS_WLAN_ALG_NONE) { 1811 pairwise == RNDIS_WLAN_ALG_NONE) {
1796 deverr(usbdev, "Unsupported pairwise cipher"); 1812 netdev_err(usbdev->net, "Unsupported pairwise cipher\n");
1797 return -ENOTSUPP; 1813 return -ENOTSUPP;
1798 } 1814 }
1799 1815
@@ -1803,28 +1819,30 @@ static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
1803 1819
1804 if (sme->crypto.n_akm_suites > 0 && 1820 if (sme->crypto.n_akm_suites > 0 &&
1805 keymgmt == RNDIS_WLAN_KEY_MGMT_NONE) { 1821 keymgmt == RNDIS_WLAN_KEY_MGMT_NONE) {
1806 deverr(usbdev, "Invalid keymgmt"); 1822 netdev_err(usbdev->net, "Invalid keymgmt\n");
1807 return -ENOTSUPP; 1823 return -ENOTSUPP;
1808 } 1824 }
1809 1825
1810 devdbg(usbdev, "cfg80211.connect('%.32s':[%pM]:%d:[%d,0x%x:0x%x]:[0x%x:" 1826 netdev_dbg(usbdev->net, "cfg80211.connect('%.32s':[%pM]:%d:[%d,0x%x:0x%x]:[0x%x:0x%x]:0x%x)\n",
1811 "0x%x]:0x%x)", sme->ssid, sme->bssid, chan, 1827 sme->ssid, sme->bssid, chan,
1812 sme->privacy, sme->crypto.wpa_versions, sme->auth_type, 1828 sme->privacy, sme->crypto.wpa_versions, sme->auth_type,
1813 groupwise, pairwise, keymgmt); 1829 groupwise, pairwise, keymgmt);
1814 1830
1815 if (is_associated(usbdev)) 1831 if (is_associated(usbdev))
1816 disassociate(usbdev, false); 1832 disassociate(usbdev, false);
1817 1833
1818 ret = set_infra_mode(usbdev, NDIS_80211_INFRA_INFRA); 1834 ret = set_infra_mode(usbdev, NDIS_80211_INFRA_INFRA);
1819 if (ret < 0) { 1835 if (ret < 0) {
1820 devdbg(usbdev, "connect: set_infra_mode failed, %d", ret); 1836 netdev_dbg(usbdev->net, "connect: set_infra_mode failed, %d\n",
1837 ret);
1821 goto err_turn_radio_on; 1838 goto err_turn_radio_on;
1822 } 1839 }
1823 1840
1824 ret = set_auth_mode(usbdev, sme->crypto.wpa_versions, sme->auth_type, 1841 ret = set_auth_mode(usbdev, sme->crypto.wpa_versions, sme->auth_type,
1825 keymgmt); 1842 keymgmt);
1826 if (ret < 0) { 1843 if (ret < 0) {
1827 devdbg(usbdev, "connect: set_auth_mode failed, %d", ret); 1844 netdev_dbg(usbdev->net, "connect: set_auth_mode failed, %d\n",
1845 ret);
1828 goto err_turn_radio_on; 1846 goto err_turn_radio_on;
1829 } 1847 }
1830 1848
@@ -1832,14 +1850,16 @@ static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
1832 1850
1833 ret = set_encr_mode(usbdev, pairwise, groupwise); 1851 ret = set_encr_mode(usbdev, pairwise, groupwise);
1834 if (ret < 0) { 1852 if (ret < 0) {
1835 devdbg(usbdev, "connect: set_encr_mode failed, %d", ret); 1853 netdev_dbg(usbdev->net, "connect: set_encr_mode failed, %d\n",
1854 ret);
1836 goto err_turn_radio_on; 1855 goto err_turn_radio_on;
1837 } 1856 }
1838 1857
1839 if (channel) { 1858 if (channel) {
1840 ret = set_channel(usbdev, chan); 1859 ret = set_channel(usbdev, chan);
1841 if (ret < 0) { 1860 if (ret < 0) {
1842 devdbg(usbdev, "connect: set_channel failed, %d", ret); 1861 netdev_dbg(usbdev->net, "connect: set_channel failed, %d\n",
1862 ret);
1843 goto err_turn_radio_on; 1863 goto err_turn_radio_on;
1844 } 1864 }
1845 } 1865 }
@@ -1848,8 +1868,8 @@ static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
1848 priv->encr_tx_key_index = sme->key_idx; 1868 priv->encr_tx_key_index = sme->key_idx;
1849 ret = add_wep_key(usbdev, sme->key, sme->key_len, sme->key_idx); 1869 ret = add_wep_key(usbdev, sme->key, sme->key_len, sme->key_idx);
1850 if (ret < 0) { 1870 if (ret < 0) {
1851 devdbg(usbdev, "connect: add_wep_key failed, %d " 1871 netdev_dbg(usbdev->net, "connect: add_wep_key failed, %d (%d, %d)\n",
1852 "(%d, %d)", ret, sme->key_len, sme->key_idx); 1872 ret, sme->key_len, sme->key_idx);
1853 goto err_turn_radio_on; 1873 goto err_turn_radio_on;
1854 } 1874 }
1855 } 1875 }
@@ -1858,7 +1878,8 @@ static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
1858 !is_broadcast_ether_addr(sme->bssid)) { 1878 !is_broadcast_ether_addr(sme->bssid)) {
1859 ret = set_bssid(usbdev, sme->bssid); 1879 ret = set_bssid(usbdev, sme->bssid);
1860 if (ret < 0) { 1880 if (ret < 0) {
1861 devdbg(usbdev, "connect: set_bssid failed, %d", ret); 1881 netdev_dbg(usbdev->net, "connect: set_bssid failed, %d\n",
1882 ret);
1862 goto err_turn_radio_on; 1883 goto err_turn_radio_on;
1863 } 1884 }
1864 } else 1885 } else
@@ -1880,7 +1901,7 @@ static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
1880 1901
1881 ret = set_essid(usbdev, &ssid); 1902 ret = set_essid(usbdev, &ssid);
1882 if (ret < 0) 1903 if (ret < 0)
1883 devdbg(usbdev, "connect: set_essid failed, %d", ret); 1904 netdev_dbg(usbdev->net, "connect: set_essid failed, %d\n", ret);
1884 return ret; 1905 return ret;
1885 1906
1886err_turn_radio_on: 1907err_turn_radio_on:
@@ -1895,7 +1916,7 @@ static int rndis_disconnect(struct wiphy *wiphy, struct net_device *dev,
1895 struct rndis_wlan_private *priv = wiphy_priv(wiphy); 1916 struct rndis_wlan_private *priv = wiphy_priv(wiphy);
1896 struct usbnet *usbdev = priv->usbdev; 1917 struct usbnet *usbdev = priv->usbdev;
1897 1918
1898 devdbg(usbdev, "cfg80211.disconnect(%d)", reason_code); 1919 netdev_dbg(usbdev->net, "cfg80211.disconnect(%d)\n", reason_code);
1899 1920
1900 priv->connected = false; 1921 priv->connected = false;
1901 memset(priv->bssid, 0, ETH_ALEN); 1922 memset(priv->bssid, 0, ETH_ALEN);
@@ -1929,21 +1950,23 @@ static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
1929 alg = RNDIS_WLAN_ALG_NONE; 1950 alg = RNDIS_WLAN_ALG_NONE;
1930 } 1951 }
1931 1952
1932 devdbg(usbdev, "cfg80211.join_ibss('%.32s':[%pM]:%d:%d)", params->ssid, 1953 netdev_dbg(usbdev->net, "cfg80211.join_ibss('%.32s':[%pM]:%d:%d)\n",
1933 params->bssid, chan, params->privacy); 1954 params->ssid, params->bssid, chan, params->privacy);
1934 1955
1935 if (is_associated(usbdev)) 1956 if (is_associated(usbdev))
1936 disassociate(usbdev, false); 1957 disassociate(usbdev, false);
1937 1958
1938 ret = set_infra_mode(usbdev, NDIS_80211_INFRA_ADHOC); 1959 ret = set_infra_mode(usbdev, NDIS_80211_INFRA_ADHOC);
1939 if (ret < 0) { 1960 if (ret < 0) {
1940 devdbg(usbdev, "join_ibss: set_infra_mode failed, %d", ret); 1961 netdev_dbg(usbdev->net, "join_ibss: set_infra_mode failed, %d\n",
1962 ret);
1941 goto err_turn_radio_on; 1963 goto err_turn_radio_on;
1942 } 1964 }
1943 1965
1944 ret = set_auth_mode(usbdev, 0, auth_type, RNDIS_WLAN_KEY_MGMT_NONE); 1966 ret = set_auth_mode(usbdev, 0, auth_type, RNDIS_WLAN_KEY_MGMT_NONE);
1945 if (ret < 0) { 1967 if (ret < 0) {
1946 devdbg(usbdev, "join_ibss: set_auth_mode failed, %d", ret); 1968 netdev_dbg(usbdev->net, "join_ibss: set_auth_mode failed, %d\n",
1969 ret);
1947 goto err_turn_radio_on; 1970 goto err_turn_radio_on;
1948 } 1971 }
1949 1972
@@ -1951,15 +1974,16 @@ static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
1951 1974
1952 ret = set_encr_mode(usbdev, alg, RNDIS_WLAN_ALG_NONE); 1975 ret = set_encr_mode(usbdev, alg, RNDIS_WLAN_ALG_NONE);
1953 if (ret < 0) { 1976 if (ret < 0) {
1954 devdbg(usbdev, "join_ibss: set_encr_mode failed, %d", ret); 1977 netdev_dbg(usbdev->net, "join_ibss: set_encr_mode failed, %d\n",
1978 ret);
1955 goto err_turn_radio_on; 1979 goto err_turn_radio_on;
1956 } 1980 }
1957 1981
1958 if (channel) { 1982 if (channel) {
1959 ret = set_channel(usbdev, chan); 1983 ret = set_channel(usbdev, chan);
1960 if (ret < 0) { 1984 if (ret < 0) {
1961 devdbg(usbdev, "join_ibss: set_channel failed, %d", 1985 netdev_dbg(usbdev->net, "join_ibss: set_channel failed, %d\n",
1962 ret); 1986 ret);
1963 goto err_turn_radio_on; 1987 goto err_turn_radio_on;
1964 } 1988 }
1965 } 1989 }
@@ -1968,7 +1992,8 @@ static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
1968 !is_broadcast_ether_addr(params->bssid)) { 1992 !is_broadcast_ether_addr(params->bssid)) {
1969 ret = set_bssid(usbdev, params->bssid); 1993 ret = set_bssid(usbdev, params->bssid);
1970 if (ret < 0) { 1994 if (ret < 0) {
1971 devdbg(usbdev, "join_ibss: set_bssid failed, %d", ret); 1995 netdev_dbg(usbdev->net, "join_ibss: set_bssid failed, %d\n",
1996 ret);
1972 goto err_turn_radio_on; 1997 goto err_turn_radio_on;
1973 } 1998 }
1974 } else 1999 } else
@@ -1988,7 +2013,8 @@ static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
1988 2013
1989 ret = set_essid(usbdev, &ssid); 2014 ret = set_essid(usbdev, &ssid);
1990 if (ret < 0) 2015 if (ret < 0)
1991 devdbg(usbdev, "join_ibss: set_essid failed, %d", ret); 2016 netdev_dbg(usbdev->net, "join_ibss: set_essid failed, %d\n",
2017 ret);
1992 return ret; 2018 return ret;
1993 2019
1994err_turn_radio_on: 2020err_turn_radio_on:
@@ -2002,7 +2028,7 @@ static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
2002 struct rndis_wlan_private *priv = wiphy_priv(wiphy); 2028 struct rndis_wlan_private *priv = wiphy_priv(wiphy);
2003 struct usbnet *usbdev = priv->usbdev; 2029 struct usbnet *usbdev = priv->usbdev;
2004 2030
2005 devdbg(usbdev, "cfg80211.leave_ibss()"); 2031 netdev_dbg(usbdev->net, "cfg80211.leave_ibss()\n");
2006 2032
2007 priv->connected = false; 2033 priv->connected = false;
2008 memset(priv->bssid, 0, ETH_ALEN); 2034 memset(priv->bssid, 0, ETH_ALEN);
@@ -2028,8 +2054,8 @@ static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
2028 struct usbnet *usbdev = priv->usbdev; 2054 struct usbnet *usbdev = priv->usbdev;
2029 __le32 flags; 2055 __le32 flags;
2030 2056
2031 devdbg(usbdev, "rndis_add_key(%i, %pM, %08x)", key_index, mac_addr, 2057 netdev_dbg(usbdev->net, "%s(%i, %pM, %08x)\n",
2032 params->cipher); 2058 __func__, key_index, mac_addr, params->cipher);
2033 2059
2034 switch (params->cipher) { 2060 switch (params->cipher) {
2035 case WLAN_CIPHER_SUITE_WEP40: 2061 case WLAN_CIPHER_SUITE_WEP40:
@@ -2050,8 +2076,8 @@ static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
2050 key_index, mac_addr, params->seq, 2076 key_index, mac_addr, params->seq,
2051 params->seq_len, params->cipher, flags); 2077 params->seq_len, params->cipher, flags);
2052 default: 2078 default:
2053 devdbg(usbdev, "rndis_add_key: unsupported cipher %08x", 2079 netdev_dbg(usbdev->net, "%s(): unsupported cipher %08x\n",
2054 params->cipher); 2080 __func__, params->cipher);
2055 return -ENOTSUPP; 2081 return -ENOTSUPP;
2056 } 2082 }
2057} 2083}
@@ -2062,7 +2088,7 @@ static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev,
2062 struct rndis_wlan_private *priv = wiphy_priv(wiphy); 2088 struct rndis_wlan_private *priv = wiphy_priv(wiphy);
2063 struct usbnet *usbdev = priv->usbdev; 2089 struct usbnet *usbdev = priv->usbdev;
2064 2090
2065 devdbg(usbdev, "rndis_del_key(%i, %pM)", key_index, mac_addr); 2091 netdev_dbg(usbdev->net, "%s(%i, %pM)\n", __func__, key_index, mac_addr);
2066 2092
2067 return remove_key(usbdev, key_index, mac_addr); 2093 return remove_key(usbdev, key_index, mac_addr);
2068} 2094}
@@ -2074,7 +2100,7 @@ static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
2074 struct usbnet *usbdev = priv->usbdev; 2100 struct usbnet *usbdev = priv->usbdev;
2075 struct rndis_wlan_encr_key key; 2101 struct rndis_wlan_encr_key key;
2076 2102
2077 devdbg(usbdev, "rndis_set_default_key(%i)", key_index); 2103 netdev_dbg(usbdev->net, "%s(%i)\n", __func__, key_index);
2078 2104
2079 priv->encr_tx_key_index = key_index; 2105 priv->encr_tx_key_index = key_index;
2080 2106
@@ -2188,7 +2214,8 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
2188 if (ret < 0) 2214 if (ret < 0)
2189 memset(bssid, 0, sizeof(bssid)); 2215 memset(bssid, 0, sizeof(bssid));
2190 2216
2191 devdbg(usbdev, "link up work: [%pM] %s", bssid, roamed ? "roamed" : ""); 2217 netdev_dbg(usbdev->net, "link up work: [%pM]%s\n",
2218 bssid, roamed ? " roamed" : "");
2192 2219
2193 /* Internal bss list in device always contains at least the currently 2220 /* Internal bss list in device always contains at least the currently
2194 * connected bss and we can get it to cfg80211 with 2221 * connected bss and we can get it to cfg80211 with
@@ -2270,8 +2297,8 @@ static void rndis_wlan_auth_indication(struct usbnet *usbdev,
2270 /* must have at least one array entry */ 2297 /* must have at least one array entry */
2271 if (len < offsetof(struct ndis_80211_status_indication, u) + 2298 if (len < offsetof(struct ndis_80211_status_indication, u) +
2272 sizeof(struct ndis_80211_auth_request)) { 2299 sizeof(struct ndis_80211_auth_request)) {
2273 devinfo(usbdev, "authentication indication: " 2300 netdev_info(usbdev->net, "authentication indication: too short message (%i)\n",
2274 "too short message (%i)", len); 2301 len);
2275 return; 2302 return;
2276 } 2303 }
2277 2304
@@ -2298,8 +2325,8 @@ static void rndis_wlan_auth_indication(struct usbnet *usbdev,
2298 type = "group_error"; 2325 type = "group_error";
2299 } 2326 }
2300 2327
2301 devinfo(usbdev, "authentication indication: %s (0x%08x)", type, 2328 netdev_info(usbdev->net, "authentication indication: %s (0x%08x)\n",
2302 le32_to_cpu(auth_req->flags)); 2329 type, le32_to_cpu(auth_req->flags));
2303 2330
2304 if (pairwise_error) { 2331 if (pairwise_error) {
2305 key_type = NL80211_KEYTYPE_PAIRWISE; 2332 key_type = NL80211_KEYTYPE_PAIRWISE;
@@ -2335,8 +2362,8 @@ static void rndis_wlan_pmkid_cand_list_indication(struct usbnet *usbdev,
2335 2362
2336 if (len < offsetof(struct ndis_80211_status_indication, u) + 2363 if (len < offsetof(struct ndis_80211_status_indication, u) +
2337 sizeof(struct ndis_80211_pmkid_cand_list)) { 2364 sizeof(struct ndis_80211_pmkid_cand_list)) {
2338 devinfo(usbdev, "pmkid candidate list indication: " 2365 netdev_info(usbdev->net, "pmkid candidate list indication: too short message (%i)\n",
2339 "too short message (%i)", len); 2366 len);
2340 return; 2367 return;
2341 } 2368 }
2342 2369
@@ -2346,18 +2373,16 @@ static void rndis_wlan_pmkid_cand_list_indication(struct usbnet *usbdev,
2346 offsetof(struct ndis_80211_status_indication, u); 2373 offsetof(struct ndis_80211_status_indication, u);
2347 2374
2348 if (len < expected_len) { 2375 if (len < expected_len) {
2349 devinfo(usbdev, "pmkid candidate list indication: " 2376 netdev_info(usbdev->net, "pmkid candidate list indication: list larger than buffer (%i < %i)\n",
2350 "list larger than buffer (%i < %i)", 2377 len, expected_len);
2351 len, expected_len);
2352 return; 2378 return;
2353 } 2379 }
2354 2380
2355 cand_list = &indication->u.cand_list; 2381 cand_list = &indication->u.cand_list;
2356 2382
2357 devinfo(usbdev, "pmkid candidate list indication: " 2383 netdev_info(usbdev->net, "pmkid candidate list indication: version %i, candidates %i\n",
2358 "version %i, candidates %i", 2384 le32_to_cpu(cand_list->version),
2359 le32_to_cpu(cand_list->version), 2385 le32_to_cpu(cand_list->num_candidates));
2360 le32_to_cpu(cand_list->num_candidates));
2361 2386
2362 if (le32_to_cpu(cand_list->version) != 1) 2387 if (le32_to_cpu(cand_list->version) != 1)
2363 return; 2388 return;
@@ -2366,8 +2391,8 @@ static void rndis_wlan_pmkid_cand_list_indication(struct usbnet *usbdev,
2366 struct ndis_80211_pmkid_candidate *cand = 2391 struct ndis_80211_pmkid_candidate *cand =
2367 &cand_list->candidate_list[i]; 2392 &cand_list->candidate_list[i];
2368 2393
2369 devdbg(usbdev, "cand[%i]: flags: 0x%08x, bssid: %pM", 2394 netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, bssid: %pM\n",
2370 i, le32_to_cpu(cand->flags), cand->bssid); 2395 i, le32_to_cpu(cand->flags), cand->bssid);
2371 2396
2372#if 0 2397#if 0
2373 struct iw_pmkid_cand pcand; 2398 struct iw_pmkid_cand pcand;
@@ -2398,15 +2423,14 @@ static void rndis_wlan_media_specific_indication(struct usbnet *usbdev,
2398 len = le32_to_cpu(msg->length); 2423 len = le32_to_cpu(msg->length);
2399 2424
2400 if (len < 8) { 2425 if (len < 8) {
2401 devinfo(usbdev, "media specific indication, " 2426 netdev_info(usbdev->net, "media specific indication, ignore too short message (%i < 8)\n",
2402 "ignore too short message (%i < 8)", len); 2427 len);
2403 return; 2428 return;
2404 } 2429 }
2405 2430
2406 if (offset + len > buflen) { 2431 if (offset + len > buflen) {
2407 devinfo(usbdev, "media specific indication, " 2432 netdev_info(usbdev->net, "media specific indication, too large to fit to buffer (%i > %i)\n",
2408 "too large to fit to buffer (%i > %i)", 2433 offset + len, buflen);
2409 offset + len, buflen);
2410 return; 2434 return;
2411 } 2435 }
2412 2436
@@ -2414,13 +2438,13 @@ static void rndis_wlan_media_specific_indication(struct usbnet *usbdev,
2414 2438
2415 switch (le32_to_cpu(indication->status_type)) { 2439 switch (le32_to_cpu(indication->status_type)) {
2416 case NDIS_80211_STATUSTYPE_RADIOSTATE: 2440 case NDIS_80211_STATUSTYPE_RADIOSTATE:
2417 devinfo(usbdev, "radio state indication: %i", 2441 netdev_info(usbdev->net, "radio state indication: %i\n",
2418 le32_to_cpu(indication->u.radio_status)); 2442 le32_to_cpu(indication->u.radio_status));
2419 return; 2443 return;
2420 2444
2421 case NDIS_80211_STATUSTYPE_MEDIASTREAMMODE: 2445 case NDIS_80211_STATUSTYPE_MEDIASTREAMMODE:
2422 devinfo(usbdev, "media stream mode indication: %i", 2446 netdev_info(usbdev->net, "media stream mode indication: %i\n",
2423 le32_to_cpu(indication->u.media_stream_mode)); 2447 le32_to_cpu(indication->u.media_stream_mode));
2424 return; 2448 return;
2425 2449
2426 case NDIS_80211_STATUSTYPE_AUTHENTICATION: 2450 case NDIS_80211_STATUSTYPE_AUTHENTICATION:
@@ -2432,9 +2456,8 @@ static void rndis_wlan_media_specific_indication(struct usbnet *usbdev,
2432 return; 2456 return;
2433 2457
2434 default: 2458 default:
2435 devinfo(usbdev, "media specific indication: " 2459 netdev_info(usbdev->net, "media specific indication: unknown status type 0x%08x\n",
2436 "unknown status type 0x%08x", 2460 le32_to_cpu(indication->status_type));
2437 le32_to_cpu(indication->status_type));
2438 } 2461 }
2439} 2462}
2440 2463
@@ -2451,14 +2474,13 @@ static void rndis_wlan_indication(struct usbnet *usbdev, void *ind, int buflen)
2451 * and userspace to think that device is 2474 * and userspace to think that device is
2452 * roaming/reassociating when it isn't. 2475 * roaming/reassociating when it isn't.
2453 */ 2476 */
2454 devdbg(usbdev, "ignored OID_802_11_ADD_KEY triggered " 2477 netdev_dbg(usbdev->net, "ignored OID_802_11_ADD_KEY triggered 'media connect'\n");
2455 "'media connect'");
2456 return; 2478 return;
2457 } 2479 }
2458 2480
2459 usbnet_pause_rx(usbdev); 2481 usbnet_pause_rx(usbdev);
2460 2482
2461 devinfo(usbdev, "media connect"); 2483 netdev_info(usbdev->net, "media connect\n");
2462 2484
2463 /* queue work to avoid recursive calls into rndis_command */ 2485 /* queue work to avoid recursive calls into rndis_command */
2464 set_bit(WORK_LINK_UP, &priv->work_pending); 2486 set_bit(WORK_LINK_UP, &priv->work_pending);
@@ -2466,7 +2488,7 @@ static void rndis_wlan_indication(struct usbnet *usbdev, void *ind, int buflen)
2466 break; 2488 break;
2467 2489
2468 case RNDIS_STATUS_MEDIA_DISCONNECT: 2490 case RNDIS_STATUS_MEDIA_DISCONNECT:
2469 devinfo(usbdev, "media disconnect"); 2491 netdev_info(usbdev->net, "media disconnect\n");
2470 2492
2471 /* queue work to avoid recursive calls into rndis_command */ 2493 /* queue work to avoid recursive calls into rndis_command */
2472 set_bit(WORK_LINK_DOWN, &priv->work_pending); 2494 set_bit(WORK_LINK_DOWN, &priv->work_pending);
@@ -2478,8 +2500,8 @@ static void rndis_wlan_indication(struct usbnet *usbdev, void *ind, int buflen)
2478 break; 2500 break;
2479 2501
2480 default: 2502 default:
2481 devinfo(usbdev, "indication: 0x%08x", 2503 netdev_info(usbdev->net, "indication: 0x%08x\n",
2482 le32_to_cpu(msg->status)); 2504 le32_to_cpu(msg->status));
2483 break; 2505 break;
2484 } 2506 }
2485} 2507}
@@ -2544,8 +2566,8 @@ static void rndis_device_poller(struct work_struct *work)
2544 if (ret == 0) 2566 if (ret == 0)
2545 priv->last_qual = level_to_qual(le32_to_cpu(rssi)); 2567 priv->last_qual = level_to_qual(le32_to_cpu(rssi));
2546 2568
2547 devdbg(usbdev, "dev-poller: OID_802_11_RSSI -> %d, rssi:%d, qual: %d", 2569 netdev_dbg(usbdev->net, "dev-poller: OID_802_11_RSSI -> %d, rssi:%d, qual: %d\n",
2548 ret, le32_to_cpu(rssi), level_to_qual(le32_to_cpu(rssi))); 2570 ret, le32_to_cpu(rssi), level_to_qual(le32_to_cpu(rssi)));
2549 2571
2550 /* Workaround transfer stalls on poor quality links. 2572 /* Workaround transfer stalls on poor quality links.
2551 * TODO: find right way to fix these stalls (as stalls do not happen 2573 * TODO: find right way to fix these stalls (as stalls do not happen
@@ -2594,23 +2616,9 @@ end:
2594/* 2616/*
2595 * driver/device initialization 2617 * driver/device initialization
2596 */ 2618 */
2597static int bcm4320a_early_init(struct usbnet *usbdev) 2619static void rndis_copy_module_params(struct usbnet *usbdev)
2598{
2599 /* bcm4320a doesn't handle configuration parameters well. Try
2600 * set any and you get partially zeroed mac and broken device.
2601 */
2602
2603 return 0;
2604}
2605
2606static int bcm4320b_early_init(struct usbnet *usbdev)
2607{ 2620{
2608 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); 2621 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
2609 char buf[8];
2610
2611 /* Early initialization settings, setting these won't have effect
2612 * if called after generic_rndis_bind().
2613 */
2614 2622
2615 priv->param_country[0] = modparam_country[0]; 2623 priv->param_country[0] = modparam_country[0];
2616 priv->param_country[1] = modparam_country[1]; 2624 priv->param_country[1] = modparam_country[1];
@@ -2652,6 +2660,32 @@ static int bcm4320b_early_init(struct usbnet *usbdev)
2652 priv->param_workaround_interval = 500; 2660 priv->param_workaround_interval = 500;
2653 else 2661 else
2654 priv->param_workaround_interval = modparam_workaround_interval; 2662 priv->param_workaround_interval = modparam_workaround_interval;
2663}
2664
2665static int bcm4320a_early_init(struct usbnet *usbdev)
2666{
2667 /* copy module parameters for bcm4320a so that iwconfig reports txpower
2668 * and workaround parameter is copied to private structure correctly.
2669 */
2670 rndis_copy_module_params(usbdev);
2671
2672 /* bcm4320a doesn't handle configuration parameters well. Try
2673 * set any and you get partially zeroed mac and broken device.
2674 */
2675
2676 return 0;
2677}
2678
2679static int bcm4320b_early_init(struct usbnet *usbdev)
2680{
2681 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
2682 char buf[8];
2683
2684 rndis_copy_module_params(usbdev);
2685
2686 /* Early initialization settings, setting these won't have effect
2687 * if called after generic_rndis_bind().
2688 */
2655 2689
2656 rndis_set_config_parameter_str(usbdev, "Country", priv->param_country); 2690 rndis_set_config_parameter_str(usbdev, "Country", priv->param_country);
2657 rndis_set_config_parameter_str(usbdev, "FrameBursting", 2691 rndis_set_config_parameter_str(usbdev, "FrameBursting",
@@ -2826,11 +2860,11 @@ static int rndis_wlan_reset(struct usbnet *usbdev)
2826 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); 2860 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
2827 int retval; 2861 int retval;
2828 2862
2829 devdbg(usbdev, "rndis_wlan_reset"); 2863 netdev_dbg(usbdev->net, "%s()\n", __func__);
2830 2864
2831 retval = rndis_reset(usbdev); 2865 retval = rndis_reset(usbdev);
2832 if (retval) 2866 if (retval)
2833 devwarn(usbdev, "rndis_reset() failed: %d", retval); 2867 netdev_warn(usbdev->net, "rndis_reset failed: %d\n", retval);
2834 2868
2835 /* rndis_reset cleared multicast list, so restore here. 2869 /* rndis_reset cleared multicast list, so restore here.
2836 (set_multicast_list() also turns on current packet filter) */ 2870 (set_multicast_list() also turns on current packet filter) */
@@ -2848,7 +2882,7 @@ static int rndis_wlan_stop(struct usbnet *usbdev)
2848 int retval; 2882 int retval;
2849 __le32 filter; 2883 __le32 filter;
2850 2884
2851 devdbg(usbdev, "rndis_wlan_stop"); 2885 netdev_dbg(usbdev->net, "%s()\n", __func__);
2852 2886
2853 retval = disassociate(usbdev, false); 2887 retval = disassociate(usbdev, false);
2854 2888
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index bf60689aaabb..5239e082cd0f 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -54,17 +54,17 @@ config RT61PCI
54 When compiled as a module, this driver will be called rt61pci. 54 When compiled as a module, this driver will be called rt61pci.
55 55
56config RT2800PCI_PCI 56config RT2800PCI_PCI
57 tristate 57 boolean
58 depends on PCI 58 depends on PCI
59 default y 59 default y
60 60
61config RT2800PCI_SOC 61config RT2800PCI_SOC
62 tristate 62 boolean
63 depends on RALINK_RT288X || RALINK_RT305X 63 depends on RALINK_RT288X || RALINK_RT305X
64 default y 64 default y
65 65
66config RT2800PCI 66config RT2800PCI
67 tristate "Ralink rt2800 (PCI/PCMCIA) support (VERY EXPERIMENTAL)" 67 tristate "Ralink rt28xx/rt30xx/rt35xx (PCI/PCIe/PCMCIA) support (EXPERIMENTAL)"
68 depends on (RT2800PCI_PCI || RT2800PCI_SOC) && EXPERIMENTAL 68 depends on (RT2800PCI_PCI || RT2800PCI_SOC) && EXPERIMENTAL
69 select RT2800_LIB 69 select RT2800_LIB
70 select RT2X00_LIB_PCI if RT2800PCI_PCI 70 select RT2X00_LIB_PCI if RT2800PCI_PCI
@@ -75,7 +75,7 @@ config RT2800PCI
75 select CRC_CCITT 75 select CRC_CCITT
76 select EEPROM_93CX6 76 select EEPROM_93CX6
77 ---help--- 77 ---help---
78 This adds support for rt2800 wireless chipset family. 78 This adds support for rt2800/rt3000/rt3500 wireless chipset family.
79 Supported chips: RT2760, RT2790, RT2860, RT2880, RT2890 & RT3052 79 Supported chips: RT2760, RT2790, RT2860, RT2880, RT2890 & RT3052
80 80
81 This driver is non-functional at the moment and is intended for 81 This driver is non-functional at the moment and is intended for
@@ -83,6 +83,32 @@ config RT2800PCI
83 83
84 When compiled as a module, this driver will be called "rt2800pci.ko". 84 When compiled as a module, this driver will be called "rt2800pci.ko".
85 85
86if RT2800PCI
87
88config RT2800PCI_RT30XX
89 bool "rt2800pci - Include support for rt30xx (PCI/PCIe/PCMCIA) devices"
90 default n
91 ---help---
92 This adds support for rt30xx wireless chipset family to the
93 rt2800pci driver.
94 Supported chips: RT3090, RT3091 & RT3092
95
96 Support for these devices is non-functional at the moment and is
97 intended for testers and developers.
98
99config RT2800PCI_RT35XX
100 bool "rt2800pci - Include support for rt35xx (PCI/PCIe/PCMCIA) devices"
101 default n
102 ---help---
103 This adds support for rt35xx wireless chipset family to the
104 rt2800pci driver.
105 Supported chips: RT3060, RT3062, RT3562, RT3592
106
107 Support for these devices is non-functional at the moment and is
108 intended for testers and developers.
109
110endif
111
86config RT2500USB 112config RT2500USB
87 tristate "Ralink rt2500 (USB) support" 113 tristate "Ralink rt2500 (USB) support"
88 depends on USB 114 depends on USB
@@ -126,6 +152,43 @@ config RT2800USB
126 152
127 When compiled as a module, this driver will be called "rt2800usb.ko". 153 When compiled as a module, this driver will be called "rt2800usb.ko".
128 154
155if RT2800USB
156
157config RT2800USB_RT30XX
158 bool "rt2800usb - Include support for rt30xx (USB) devices"
159 default n
160 ---help---
161 This adds support for rt30xx wireless chipset family to the
162 rt2800usb driver.
163 Supported chips: RT3070, RT3071 & RT3072
164
165 Support for these devices is non-functional at the moment and is
166 intended for testers and developers.
167
168config RT2800USB_RT35XX
169 bool "rt2800usb - Include support for rt35xx (USB) devices"
170 default n
171 ---help---
172 This adds support for rt35xx wireless chipset family to the
173 rt2800usb driver.
174 Supported chips: RT3572
175
176 Support for these devices is non-functional at the moment and is
177 intended for testers and developers.
178
179config RT2800USB_UNKNOWN
180 bool "rt2800usb - Include support for unknown (USB) devices"
181 default n
182 ---help---
183 This adds support for rt2800 family devices that are known to
184 have a rt2800 family chipset, but for which the exact chipset
185 is unknown.
186
187 Support status for these devices is unknown, and enabling these
188 devices may or may not work.
189
190endif
191
129config RT2800_LIB 192config RT2800_LIB
130 tristate 193 tristate
131 194
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index e7f46405a418..c22b04042d5c 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -451,7 +451,7 @@ static void rt2400pci_config_channel(struct rt2x00_dev *rt2x00dev,
451 /* 451 /*
452 * RF2420 chipset don't need any additional actions. 452 * RF2420 chipset don't need any additional actions.
453 */ 453 */
454 if (rt2x00_rf(&rt2x00dev->chip, RF2420)) 454 if (rt2x00_rf(rt2x00dev, RF2420))
455 return; 455 return;
456 456
457 /* 457 /*
@@ -1340,11 +1340,10 @@ static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1340 */ 1340 */
1341 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); 1341 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
1342 rt2x00pci_register_read(rt2x00dev, CSR0, &reg); 1342 rt2x00pci_register_read(rt2x00dev, CSR0, &reg);
1343 rt2x00_set_chip_rf(rt2x00dev, value, reg); 1343 rt2x00_set_chip(rt2x00dev, RT2460, value,
1344 rt2x00_print_chip(rt2x00dev); 1344 rt2x00_get_field32(reg, CSR0_REVISION));
1345 1345
1346 if (!rt2x00_rf(&rt2x00dev->chip, RF2420) && 1346 if (!rt2x00_rf(rt2x00dev, RF2420) && !rt2x00_rf(rt2x00dev, RF2421)) {
1347 !rt2x00_rf(&rt2x00dev->chip, RF2421)) {
1348 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 1347 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
1349 return -ENODEV; 1348 return -ENODEV;
1350 } 1349 }
@@ -1562,7 +1561,6 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = {
1562 .get_stats = rt2x00mac_get_stats, 1561 .get_stats = rt2x00mac_get_stats,
1563 .bss_info_changed = rt2x00mac_bss_info_changed, 1562 .bss_info_changed = rt2x00mac_bss_info_changed,
1564 .conf_tx = rt2400pci_conf_tx, 1563 .conf_tx = rt2400pci_conf_tx,
1565 .get_tx_stats = rt2x00mac_get_tx_stats,
1566 .get_tsf = rt2400pci_get_tsf, 1564 .get_tsf = rt2400pci_get_tsf,
1567 .tx_last_beacon = rt2400pci_tx_last_beacon, 1565 .tx_last_beacon = rt2400pci_tx_last_beacon,
1568 .rfkill_poll = rt2x00mac_rfkill_poll, 1566 .rfkill_poll = rt2x00mac_rfkill_poll,
@@ -1643,7 +1641,7 @@ static const struct rt2x00_ops rt2400pci_ops = {
1643/* 1641/*
1644 * RT2400pci module information. 1642 * RT2400pci module information.
1645 */ 1643 */
1646static struct pci_device_id rt2400pci_device_table[] = { 1644static DEFINE_PCI_DEVICE_TABLE(rt2400pci_device_table) = {
1647 { PCI_DEVICE(0x1814, 0x0101), PCI_DEVICE_DATA(&rt2400pci_ops) }, 1645 { PCI_DEVICE(0x1814, 0x0101), PCI_DEVICE_DATA(&rt2400pci_ops) },
1648 { 0, } 1646 { 0, }
1649}; 1647};
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index c3dea697b907..c048b18f4133 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -65,6 +65,7 @@
65 * CSR0: ASIC revision number. 65 * CSR0: ASIC revision number.
66 */ 66 */
67#define CSR0 0x0000 67#define CSR0 0x0000
68#define CSR0_REVISION FIELD32(0x0000ffff)
68 69
69/* 70/*
70 * CSR1: System control register. 71 * CSR1: System control register.
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 408fcfc120f5..52bbcf1bd17c 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -440,8 +440,7 @@ static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev,
440 /* 440 /*
441 * RT2525E and RT5222 need to flip TX I/Q 441 * RT2525E and RT5222 need to flip TX I/Q
442 */ 442 */
443 if (rt2x00_rf(&rt2x00dev->chip, RF2525E) || 443 if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) {
444 rt2x00_rf(&rt2x00dev->chip, RF5222)) {
445 rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1); 444 rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1);
446 rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 1); 445 rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 1);
447 rt2x00_set_field32(&reg, BBPCSR1_OFDM_FLIP, 1); 446 rt2x00_set_field32(&reg, BBPCSR1_OFDM_FLIP, 1);
@@ -449,7 +448,7 @@ static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev,
449 /* 448 /*
450 * RT2525E does not need RX I/Q Flip. 449 * RT2525E does not need RX I/Q Flip.
451 */ 450 */
452 if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) 451 if (rt2x00_rf(rt2x00dev, RF2525E))
453 rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0); 452 rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0);
454 } else { 453 } else {
455 rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 0); 454 rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 0);
@@ -475,14 +474,14 @@ static void rt2500pci_config_channel(struct rt2x00_dev *rt2x00dev,
475 * Switch on tuning bits. 474 * Switch on tuning bits.
476 * For RT2523 devices we do not need to update the R1 register. 475 * For RT2523 devices we do not need to update the R1 register.
477 */ 476 */
478 if (!rt2x00_rf(&rt2x00dev->chip, RF2523)) 477 if (!rt2x00_rf(rt2x00dev, RF2523))
479 rt2x00_set_field32(&rf->rf1, RF1_TUNER, 1); 478 rt2x00_set_field32(&rf->rf1, RF1_TUNER, 1);
480 rt2x00_set_field32(&rf->rf3, RF3_TUNER, 1); 479 rt2x00_set_field32(&rf->rf3, RF3_TUNER, 1);
481 480
482 /* 481 /*
483 * For RT2525 we should first set the channel to half band higher. 482 * For RT2525 we should first set the channel to half band higher.
484 */ 483 */
485 if (rt2x00_rf(&rt2x00dev->chip, RF2525)) { 484 if (rt2x00_rf(rt2x00dev, RF2525)) {
486 static const u32 vals[] = { 485 static const u32 vals[] = {
487 0x00080cbe, 0x00080d02, 0x00080d06, 0x00080d0a, 486 0x00080cbe, 0x00080d02, 0x00080d06, 0x00080d0a,
488 0x00080d0e, 0x00080d12, 0x00080d16, 0x00080d1a, 487 0x00080d0e, 0x00080d12, 0x00080d16, 0x00080d1a,
@@ -516,7 +515,7 @@ static void rt2500pci_config_channel(struct rt2x00_dev *rt2x00dev,
516 * Switch off tuning bits. 515 * Switch off tuning bits.
517 * For RT2523 devices we do not need to update the R1 register. 516 * For RT2523 devices we do not need to update the R1 register.
518 */ 517 */
519 if (!rt2x00_rf(&rt2x00dev->chip, RF2523)) { 518 if (!rt2x00_rf(rt2x00dev, RF2523)) {
520 rt2x00_set_field32(&rf->rf1, RF1_TUNER, 0); 519 rt2x00_set_field32(&rf->rf1, RF1_TUNER, 0);
521 rt2500pci_rf_write(rt2x00dev, 1, rf->rf1); 520 rt2500pci_rf_write(rt2x00dev, 1, rf->rf1);
522 } 521 }
@@ -640,7 +639,7 @@ static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev,
640 * up to version C the link tuning should halt after 20 639 * up to version C the link tuning should halt after 20
641 * seconds while being associated. 640 * seconds while being associated.
642 */ 641 */
643 if (rt2x00_rev(&rt2x00dev->chip) < RT2560_VERSION_D && 642 if (rt2x00_rev(rt2x00dev) < RT2560_VERSION_D &&
644 rt2x00dev->intf_associated && count > 20) 643 rt2x00dev->intf_associated && count > 20)
645 return; 644 return;
646 645
@@ -650,7 +649,7 @@ static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev,
650 * should go straight to dynamic CCA tuning when they 649 * should go straight to dynamic CCA tuning when they
651 * are not associated. 650 * are not associated.
652 */ 651 */
653 if (rt2x00_rev(&rt2x00dev->chip) < RT2560_VERSION_D || 652 if (rt2x00_rev(rt2x00dev) < RT2560_VERSION_D ||
654 !rt2x00dev->intf_associated) 653 !rt2x00dev->intf_associated)
655 goto dynamic_cca_tune; 654 goto dynamic_cca_tune;
656 655
@@ -1504,15 +1503,15 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1504 */ 1503 */
1505 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); 1504 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
1506 rt2x00pci_register_read(rt2x00dev, CSR0, &reg); 1505 rt2x00pci_register_read(rt2x00dev, CSR0, &reg);
1507 rt2x00_set_chip_rf(rt2x00dev, value, reg); 1506 rt2x00_set_chip(rt2x00dev, RT2560, value,
1508 rt2x00_print_chip(rt2x00dev); 1507 rt2x00_get_field32(reg, CSR0_REVISION));
1509 1508
1510 if (!rt2x00_rf(&rt2x00dev->chip, RF2522) && 1509 if (!rt2x00_rf(rt2x00dev, RF2522) &&
1511 !rt2x00_rf(&rt2x00dev->chip, RF2523) && 1510 !rt2x00_rf(rt2x00dev, RF2523) &&
1512 !rt2x00_rf(&rt2x00dev->chip, RF2524) && 1511 !rt2x00_rf(rt2x00dev, RF2524) &&
1513 !rt2x00_rf(&rt2x00dev->chip, RF2525) && 1512 !rt2x00_rf(rt2x00dev, RF2525) &&
1514 !rt2x00_rf(&rt2x00dev->chip, RF2525E) && 1513 !rt2x00_rf(rt2x00dev, RF2525E) &&
1515 !rt2x00_rf(&rt2x00dev->chip, RF5222)) { 1514 !rt2x00_rf(rt2x00dev, RF5222)) {
1516 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 1515 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
1517 return -ENODEV; 1516 return -ENODEV;
1518 } 1517 }
@@ -1744,22 +1743,22 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1744 spec->supported_bands = SUPPORT_BAND_2GHZ; 1743 spec->supported_bands = SUPPORT_BAND_2GHZ;
1745 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 1744 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
1746 1745
1747 if (rt2x00_rf(&rt2x00dev->chip, RF2522)) { 1746 if (rt2x00_rf(rt2x00dev, RF2522)) {
1748 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522); 1747 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522);
1749 spec->channels = rf_vals_bg_2522; 1748 spec->channels = rf_vals_bg_2522;
1750 } else if (rt2x00_rf(&rt2x00dev->chip, RF2523)) { 1749 } else if (rt2x00_rf(rt2x00dev, RF2523)) {
1751 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523); 1750 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523);
1752 spec->channels = rf_vals_bg_2523; 1751 spec->channels = rf_vals_bg_2523;
1753 } else if (rt2x00_rf(&rt2x00dev->chip, RF2524)) { 1752 } else if (rt2x00_rf(rt2x00dev, RF2524)) {
1754 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524); 1753 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524);
1755 spec->channels = rf_vals_bg_2524; 1754 spec->channels = rf_vals_bg_2524;
1756 } else if (rt2x00_rf(&rt2x00dev->chip, RF2525)) { 1755 } else if (rt2x00_rf(rt2x00dev, RF2525)) {
1757 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525); 1756 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525);
1758 spec->channels = rf_vals_bg_2525; 1757 spec->channels = rf_vals_bg_2525;
1759 } else if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) { 1758 } else if (rt2x00_rf(rt2x00dev, RF2525E)) {
1760 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e); 1759 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e);
1761 spec->channels = rf_vals_bg_2525e; 1760 spec->channels = rf_vals_bg_2525e;
1762 } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) { 1761 } else if (rt2x00_rf(rt2x00dev, RF5222)) {
1763 spec->supported_bands |= SUPPORT_BAND_5GHZ; 1762 spec->supported_bands |= SUPPORT_BAND_5GHZ;
1764 spec->num_channels = ARRAY_SIZE(rf_vals_5222); 1763 spec->num_channels = ARRAY_SIZE(rf_vals_5222);
1765 spec->channels = rf_vals_5222; 1764 spec->channels = rf_vals_5222;
@@ -1860,7 +1859,6 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = {
1860 .get_stats = rt2x00mac_get_stats, 1859 .get_stats = rt2x00mac_get_stats,
1861 .bss_info_changed = rt2x00mac_bss_info_changed, 1860 .bss_info_changed = rt2x00mac_bss_info_changed,
1862 .conf_tx = rt2x00mac_conf_tx, 1861 .conf_tx = rt2x00mac_conf_tx,
1863 .get_tx_stats = rt2x00mac_get_tx_stats,
1864 .get_tsf = rt2500pci_get_tsf, 1862 .get_tsf = rt2500pci_get_tsf,
1865 .tx_last_beacon = rt2500pci_tx_last_beacon, 1863 .tx_last_beacon = rt2500pci_tx_last_beacon,
1866 .rfkill_poll = rt2x00mac_rfkill_poll, 1864 .rfkill_poll = rt2x00mac_rfkill_poll,
@@ -1941,7 +1939,7 @@ static const struct rt2x00_ops rt2500pci_ops = {
1941/* 1939/*
1942 * RT2500pci module information. 1940 * RT2500pci module information.
1943 */ 1941 */
1944static struct pci_device_id rt2500pci_device_table[] = { 1942static DEFINE_PCI_DEVICE_TABLE(rt2500pci_device_table) = {
1945 { PCI_DEVICE(0x1814, 0x0201), PCI_DEVICE_DATA(&rt2500pci_ops) }, 1943 { PCI_DEVICE(0x1814, 0x0201), PCI_DEVICE_DATA(&rt2500pci_ops) },
1946 { 0, } 1944 { 0, }
1947}; 1945};
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h
index c6bd1fcae7eb..d708031361ac 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/rt2x00/rt2500pci.h
@@ -76,6 +76,7 @@
76 * CSR0: ASIC revision number. 76 * CSR0: ASIC revision number.
77 */ 77 */
78#define CSR0 0x0000 78#define CSR0 0x0000
79#define CSR0_REVISION FIELD32(0x0000ffff)
79 80
80/* 81/*
81 * CSR1: System control register. 82 * CSR1: System control register.
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 83f2592c59de..ee34c137e7cd 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -565,8 +565,7 @@ static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev,
565 /* 565 /*
566 * RT2525E and RT5222 need to flip TX I/Q 566 * RT2525E and RT5222 need to flip TX I/Q
567 */ 567 */
568 if (rt2x00_rf(&rt2x00dev->chip, RF2525E) || 568 if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) {
569 rt2x00_rf(&rt2x00dev->chip, RF5222)) {
570 rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1); 569 rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1);
571 rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 1); 570 rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 1);
572 rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 1); 571 rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 1);
@@ -574,7 +573,7 @@ static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev,
574 /* 573 /*
575 * RT2525E does not need RX I/Q Flip. 574 * RT2525E does not need RX I/Q Flip.
576 */ 575 */
577 if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) 576 if (rt2x00_rf(rt2x00dev, RF2525E))
578 rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0); 577 rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0);
579 } else { 578 } else {
580 rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 0); 579 rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 0);
@@ -598,7 +597,7 @@ static void rt2500usb_config_channel(struct rt2x00_dev *rt2x00dev,
598 /* 597 /*
599 * For RT2525E we should first set the channel to half band higher. 598 * For RT2525E we should first set the channel to half band higher.
600 */ 599 */
601 if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) { 600 if (rt2x00_rf(rt2x00dev, RF2525E)) {
602 static const u32 vals[] = { 601 static const u32 vals[] = {
603 0x000008aa, 0x000008ae, 0x000008ae, 0x000008b2, 602 0x000008aa, 0x000008ae, 0x000008ae, 0x000008b2,
604 0x000008b2, 0x000008b6, 0x000008b6, 0x000008ba, 603 0x000008b2, 0x000008b6, 0x000008b6, 0x000008ba,
@@ -793,7 +792,7 @@ static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev)
793 rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 1); 792 rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 1);
794 rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); 793 rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg);
795 794
796 if (rt2x00_rev(&rt2x00dev->chip) >= RT2570_VERSION_C) { 795 if (rt2x00_rev(rt2x00dev) >= RT2570_VERSION_C) {
797 rt2500usb_register_read(rt2x00dev, PHY_CSR2, &reg); 796 rt2500usb_register_read(rt2x00dev, PHY_CSR2, &reg);
798 rt2x00_set_field16(&reg, PHY_CSR2_LNA, 0); 797 rt2x00_set_field16(&reg, PHY_CSR2_LNA, 0);
799 } else { 798 } else {
@@ -1409,21 +1408,18 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1409 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); 1408 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
1410 rt2500usb_register_read(rt2x00dev, MAC_CSR0, &reg); 1409 rt2500usb_register_read(rt2x00dev, MAC_CSR0, &reg);
1411 rt2x00_set_chip(rt2x00dev, RT2570, value, reg); 1410 rt2x00_set_chip(rt2x00dev, RT2570, value, reg);
1412 rt2x00_print_chip(rt2x00dev);
1413
1414 if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0) ||
1415 rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) {
1416 1411
1412 if (((reg & 0xfff0) != 0) || ((reg & 0x0000000f) == 0)) {
1417 ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); 1413 ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
1418 return -ENODEV; 1414 return -ENODEV;
1419 } 1415 }
1420 1416
1421 if (!rt2x00_rf(&rt2x00dev->chip, RF2522) && 1417 if (!rt2x00_rf(rt2x00dev, RF2522) &&
1422 !rt2x00_rf(&rt2x00dev->chip, RF2523) && 1418 !rt2x00_rf(rt2x00dev, RF2523) &&
1423 !rt2x00_rf(&rt2x00dev->chip, RF2524) && 1419 !rt2x00_rf(rt2x00dev, RF2524) &&
1424 !rt2x00_rf(&rt2x00dev->chip, RF2525) && 1420 !rt2x00_rf(rt2x00dev, RF2525) &&
1425 !rt2x00_rf(&rt2x00dev->chip, RF2525E) && 1421 !rt2x00_rf(rt2x00dev, RF2525E) &&
1426 !rt2x00_rf(&rt2x00dev->chip, RF5222)) { 1422 !rt2x00_rf(rt2x00dev, RF5222)) {
1427 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 1423 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
1428 return -ENODEV; 1424 return -ENODEV;
1429 } 1425 }
@@ -1667,22 +1663,22 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1667 spec->supported_bands = SUPPORT_BAND_2GHZ; 1663 spec->supported_bands = SUPPORT_BAND_2GHZ;
1668 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 1664 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
1669 1665
1670 if (rt2x00_rf(&rt2x00dev->chip, RF2522)) { 1666 if (rt2x00_rf(rt2x00dev, RF2522)) {
1671 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522); 1667 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522);
1672 spec->channels = rf_vals_bg_2522; 1668 spec->channels = rf_vals_bg_2522;
1673 } else if (rt2x00_rf(&rt2x00dev->chip, RF2523)) { 1669 } else if (rt2x00_rf(rt2x00dev, RF2523)) {
1674 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523); 1670 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523);
1675 spec->channels = rf_vals_bg_2523; 1671 spec->channels = rf_vals_bg_2523;
1676 } else if (rt2x00_rf(&rt2x00dev->chip, RF2524)) { 1672 } else if (rt2x00_rf(rt2x00dev, RF2524)) {
1677 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524); 1673 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524);
1678 spec->channels = rf_vals_bg_2524; 1674 spec->channels = rf_vals_bg_2524;
1679 } else if (rt2x00_rf(&rt2x00dev->chip, RF2525)) { 1675 } else if (rt2x00_rf(rt2x00dev, RF2525)) {
1680 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525); 1676 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525);
1681 spec->channels = rf_vals_bg_2525; 1677 spec->channels = rf_vals_bg_2525;
1682 } else if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) { 1678 } else if (rt2x00_rf(rt2x00dev, RF2525E)) {
1683 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e); 1679 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e);
1684 spec->channels = rf_vals_bg_2525e; 1680 spec->channels = rf_vals_bg_2525e;
1685 } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) { 1681 } else if (rt2x00_rf(rt2x00dev, RF5222)) {
1686 spec->supported_bands |= SUPPORT_BAND_5GHZ; 1682 spec->supported_bands |= SUPPORT_BAND_5GHZ;
1687 spec->num_channels = ARRAY_SIZE(rf_vals_5222); 1683 spec->num_channels = ARRAY_SIZE(rf_vals_5222);
1688 spec->channels = rf_vals_5222; 1684 spec->channels = rf_vals_5222;
@@ -1763,7 +1759,6 @@ static const struct ieee80211_ops rt2500usb_mac80211_ops = {
1763 .get_stats = rt2x00mac_get_stats, 1759 .get_stats = rt2x00mac_get_stats,
1764 .bss_info_changed = rt2x00mac_bss_info_changed, 1760 .bss_info_changed = rt2x00mac_bss_info_changed,
1765 .conf_tx = rt2x00mac_conf_tx, 1761 .conf_tx = rt2x00mac_conf_tx,
1766 .get_tx_stats = rt2x00mac_get_tx_stats,
1767 .rfkill_poll = rt2x00mac_rfkill_poll, 1762 .rfkill_poll = rt2x00mac_rfkill_poll,
1768}; 1763};
1769 1764
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 1a7eae357fef..74c0433dba37 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -60,11 +60,11 @@
60/* 60/*
61 * Chipset version. 61 * Chipset version.
62 */ 62 */
63#define RT2860C_VERSION 0x28600100 63#define RT2860C_VERSION 0x0100
64#define RT2860D_VERSION 0x28600101 64#define RT2860D_VERSION 0x0101
65#define RT2880E_VERSION 0x28720200 65#define RT2880E_VERSION 0x0200
66#define RT2883_VERSION 0x28830300 66#define RT2883_VERSION 0x0300
67#define RT3070_VERSION 0x30700200 67#define RT3070_VERSION 0x0200
68 68
69/* 69/*
70 * Signal information. 70 * Signal information.
@@ -408,8 +408,8 @@
408 * ASIC_VER: 2860 or 2870 408 * ASIC_VER: 2860 or 2870
409 */ 409 */
410#define MAC_CSR0 0x1000 410#define MAC_CSR0 0x1000
411#define MAC_CSR0_ASIC_REV FIELD32(0x0000ffff) 411#define MAC_CSR0_REVISION FIELD32(0x0000ffff)
412#define MAC_CSR0_ASIC_VER FIELD32(0xffff0000) 412#define MAC_CSR0_CHIPSET FIELD32(0xffff0000)
413 413
414/* 414/*
415 * MAC_SYS_CTRL: 415 * MAC_SYS_CTRL:
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 9deae41cb784..18d4d8e4ae6b 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -37,9 +37,12 @@
37#include <linux/module.h> 37#include <linux/module.h>
38 38
39#include "rt2x00.h" 39#include "rt2x00.h"
40#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE) 40#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
41#include "rt2x00usb.h" 41#include "rt2x00usb.h"
42#endif 42#endif
43#if defined(CONFIG_RT2X00_LIB_PCI) || defined(CONFIG_RT2X00_LIB_PCI_MODULE)
44#include "rt2x00pci.h"
45#endif
43#include "rt2800lib.h" 46#include "rt2800lib.h"
44#include "rt2800.h" 47#include "rt2800.h"
45#include "rt2800usb.h" 48#include "rt2800usb.h"
@@ -89,7 +92,7 @@ static void rt2800_bbp_write(struct rt2x00_dev *rt2x00dev,
89 rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word); 92 rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
90 rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1); 93 rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
91 rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 0); 94 rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 0);
92 if (rt2x00_intf_is_pci(rt2x00dev)) 95 if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
93 rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1); 96 rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
94 97
95 rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg); 98 rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
@@ -118,7 +121,7 @@ static void rt2800_bbp_read(struct rt2x00_dev *rt2x00dev,
118 rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word); 121 rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
119 rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1); 122 rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
120 rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 1); 123 rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 1);
121 if (rt2x00_intf_is_pci(rt2x00dev)) 124 if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
122 rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1); 125 rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
123 126
124 rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg); 127 rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
@@ -218,10 +221,9 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
218 u32 reg; 221 u32 reg;
219 222
220 /* 223 /*
221 * RT2880 and RT3052 don't support MCU requests. 224 * SOC devices don't support MCU requests.
222 */ 225 */
223 if (rt2x00_rt(&rt2x00dev->chip, RT2880) || 226 if (rt2x00_is_soc(rt2x00dev))
224 rt2x00_rt(&rt2x00dev->chip, RT3052))
225 return; 227 return;
226 228
227 mutex_lock(&rt2x00dev->csr_mutex); 229 mutex_lock(&rt2x00dev->csr_mutex);
@@ -246,6 +248,25 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
246} 248}
247EXPORT_SYMBOL_GPL(rt2800_mcu_request); 249EXPORT_SYMBOL_GPL(rt2800_mcu_request);
248 250
251int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
252{
253 unsigned int i;
254 u32 reg;
255
256 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
257 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
258 if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
259 !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
260 return 0;
261
262 msleep(1);
263 }
264
265 ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
266 return -EACCES;
267}
268EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready);
269
249#ifdef CONFIG_RT2X00_LIB_DEBUGFS 270#ifdef CONFIG_RT2X00_LIB_DEBUGFS
250const struct rt2x00debug rt2800_rt2x00debug = { 271const struct rt2x00debug rt2800_rt2x00debug = {
251 .owner = THIS_MODULE, 272 .owner = THIS_MODULE,
@@ -348,7 +369,7 @@ static int rt2800_blink_set(struct led_classdev *led_cdev,
348 return 0; 369 return 0;
349} 370}
350 371
351void rt2800_init_led(struct rt2x00_dev *rt2x00dev, 372static void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
352 struct rt2x00_led *led, enum led_type type) 373 struct rt2x00_led *led, enum led_type type)
353{ 374{
354 led->rt2x00dev = rt2x00dev; 375 led->rt2x00dev = rt2x00dev;
@@ -357,7 +378,6 @@ void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
357 led->led_dev.blink_set = rt2800_blink_set; 378 led->led_dev.blink_set = rt2800_blink_set;
358 led->flags = LED_INITIALIZED; 379 led->flags = LED_INITIALIZED;
359} 380}
360EXPORT_SYMBOL_GPL(rt2800_init_led);
361#endif /* CONFIG_RT2X00_LIB_LEDS */ 381#endif /* CONFIG_RT2X00_LIB_LEDS */
362 382
363/* 383/*
@@ -643,7 +663,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
643 switch ((int)ant->tx) { 663 switch ((int)ant->tx) {
644 case 1: 664 case 1:
645 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0); 665 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
646 if (rt2x00_intf_is_pci(rt2x00dev)) 666 if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
647 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0); 667 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
648 break; 668 break;
649 case 2: 669 case 2:
@@ -806,12 +826,12 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
806 unsigned int tx_pin; 826 unsigned int tx_pin;
807 u8 bbp; 827 u8 bbp;
808 828
809 if ((rt2x00_rt(&rt2x00dev->chip, RT3070) || 829 if ((rt2x00_rt(rt2x00dev, RT3070) ||
810 rt2x00_rt(&rt2x00dev->chip, RT3090)) && 830 rt2x00_rt(rt2x00dev, RT3090)) &&
811 (rt2x00_rf(&rt2x00dev->chip, RF2020) || 831 (rt2x00_rf(rt2x00dev, RF2020) ||
812 rt2x00_rf(&rt2x00dev->chip, RF3020) || 832 rt2x00_rf(rt2x00dev, RF3020) ||
813 rt2x00_rf(&rt2x00dev->chip, RF3021) || 833 rt2x00_rf(rt2x00dev, RF3021) ||
814 rt2x00_rf(&rt2x00dev->chip, RF3022))) 834 rt2x00_rf(rt2x00dev, RF3022)))
815 rt2800_config_channel_rt3x(rt2x00dev, conf, rf, info); 835 rt2800_config_channel_rt3x(rt2x00dev, conf, rf, info);
816 else 836 else
817 rt2800_config_channel_rt2x(rt2x00dev, conf, rf, info); 837 rt2800_config_channel_rt2x(rt2x00dev, conf, rf, info);
@@ -878,7 +898,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
878 rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf)); 898 rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf));
879 rt2800_bbp_write(rt2x00dev, 3, bbp); 899 rt2800_bbp_write(rt2x00dev, 3, bbp);
880 900
881 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) { 901 if (rt2x00_rt(rt2x00dev, RT2860) &&
902 (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)) {
882 if (conf_is_ht40(conf)) { 903 if (conf_is_ht40(conf)) {
883 rt2800_bbp_write(rt2x00dev, 69, 0x1a); 904 rt2800_bbp_write(rt2x00dev, 69, 0x1a);
884 rt2800_bbp_write(rt2x00dev, 70, 0x0a); 905 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
@@ -1040,8 +1061,9 @@ EXPORT_SYMBOL_GPL(rt2800_link_stats);
1040static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev) 1061static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
1041{ 1062{
1042 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) { 1063 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
1043 if (rt2x00_intf_is_usb(rt2x00dev) && 1064 if (rt2x00_is_usb(rt2x00dev) &&
1044 rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) 1065 rt2x00_rt(rt2x00dev, RT3070) &&
1066 (rt2x00_rev(rt2x00dev) == RT3070_VERSION))
1045 return 0x1c + (2 * rt2x00dev->lna_gain); 1067 return 0x1c + (2 * rt2x00dev->lna_gain);
1046 else 1068 else
1047 return 0x2e + rt2x00dev->lna_gain; 1069 return 0x2e + rt2x00dev->lna_gain;
@@ -1072,7 +1094,8 @@ EXPORT_SYMBOL_GPL(rt2800_reset_tuner);
1072void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, 1094void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
1073 const u32 count) 1095 const u32 count)
1074{ 1096{
1075 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) 1097 if (rt2x00_rt(rt2x00dev, RT2860) &&
1098 (rt2x00_rev(rt2x00dev) == RT2860C_VERSION))
1076 return; 1099 return;
1077 1100
1078 /* 1101 /*
@@ -1092,7 +1115,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1092 u32 reg; 1115 u32 reg;
1093 unsigned int i; 1116 unsigned int i;
1094 1117
1095 if (rt2x00_intf_is_usb(rt2x00dev)) { 1118 if (rt2x00_is_usb(rt2x00dev)) {
1096 /* 1119 /*
1097 * Wait until BBP and RF are ready. 1120 * Wait until BBP and RF are ready.
1098 */ 1121 */
@@ -1111,7 +1134,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1111 rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg); 1134 rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
1112 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 1135 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL,
1113 reg & ~0x00002000); 1136 reg & ~0x00002000);
1114 } else if (rt2x00_intf_is_pci(rt2x00dev)) 1137 } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
1115 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); 1138 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
1116 1139
1117 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 1140 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
@@ -1119,9 +1142,9 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1119 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1); 1142 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
1120 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 1143 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
1121 1144
1122 if (rt2x00_intf_is_usb(rt2x00dev)) { 1145 if (rt2x00_is_usb(rt2x00dev)) {
1123 rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000); 1146 rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
1124#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE) 1147#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
1125 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, 1148 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
1126 USB_MODE_RESET, REGISTER_TIMEOUT); 1149 USB_MODE_RESET, REGISTER_TIMEOUT);
1127#endif 1150#endif
@@ -1157,8 +1180,9 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1157 rt2x00_set_field32(&reg, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0); 1180 rt2x00_set_field32(&reg, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0);
1158 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 1181 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
1159 1182
1160 if (rt2x00_intf_is_usb(rt2x00dev) && 1183 if (rt2x00_is_usb(rt2x00dev) &&
1161 rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) { 1184 rt2x00_rt(rt2x00dev, RT3070) &&
1185 (rt2x00_rev(rt2x00dev) == RT3070_VERSION)) {
1162 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); 1186 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
1163 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); 1187 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
1164 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); 1188 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@@ -1185,8 +1209,14 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1185 1209
1186 rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg); 1210 rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
1187 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE); 1211 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
1188 if (rt2x00_rev(&rt2x00dev->chip) >= RT2880E_VERSION && 1212 if ((rt2x00_rt(rt2x00dev, RT2872) &&
1189 rt2x00_rev(&rt2x00dev->chip) < RT3070_VERSION) 1213 (rt2x00_rev(rt2x00dev) >= RT2880E_VERSION)) ||
1214 rt2x00_rt(rt2x00dev, RT2880) ||
1215 rt2x00_rt(rt2x00dev, RT2883) ||
1216 rt2x00_rt(rt2x00dev, RT2890) ||
1217 rt2x00_rt(rt2x00dev, RT3052) ||
1218 (rt2x00_rt(rt2x00dev, RT3070) &&
1219 (rt2x00_rev(rt2x00dev) < RT3070_VERSION)))
1190 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2); 1220 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
1191 else 1221 else
1192 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1); 1222 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
@@ -1276,7 +1306,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1276 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1); 1306 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1277 rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg); 1307 rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg);
1278 1308
1279 if (rt2x00_intf_is_usb(rt2x00dev)) { 1309 if (rt2x00_is_usb(rt2x00dev)) {
1280 rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40006); 1310 rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40006);
1281 1311
1282 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg); 1312 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
@@ -1336,7 +1366,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1336 rt2800_register_write(rt2x00dev, HW_BEACON_BASE6, 0); 1366 rt2800_register_write(rt2x00dev, HW_BEACON_BASE6, 0);
1337 rt2800_register_write(rt2x00dev, HW_BEACON_BASE7, 0); 1367 rt2800_register_write(rt2x00dev, HW_BEACON_BASE7, 0);
1338 1368
1339 if (rt2x00_intf_is_usb(rt2x00dev)) { 1369 if (rt2x00_is_usb(rt2x00dev)) {
1340 rt2800_register_read(rt2x00dev, USB_CYC_CFG, &reg); 1370 rt2800_register_read(rt2x00dev, USB_CYC_CFG, &reg);
1341 rt2x00_set_field32(&reg, USB_CYC_CFG_CLOCK_CYCLE, 30); 1371 rt2x00_set_field32(&reg, USB_CYC_CFG_CLOCK_CYCLE, 30);
1342 rt2800_register_write(rt2x00dev, USB_CYC_CFG, reg); 1372 rt2800_register_write(rt2x00dev, USB_CYC_CFG, reg);
@@ -1465,22 +1495,25 @@ int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
1465 rt2800_bbp_write(rt2x00dev, 103, 0x00); 1495 rt2800_bbp_write(rt2x00dev, 103, 0x00);
1466 rt2800_bbp_write(rt2x00dev, 105, 0x05); 1496 rt2800_bbp_write(rt2x00dev, 105, 0x05);
1467 1497
1468 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) { 1498 if (rt2x00_rt(rt2x00dev, RT2860) &&
1499 (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)) {
1469 rt2800_bbp_write(rt2x00dev, 69, 0x16); 1500 rt2800_bbp_write(rt2x00dev, 69, 0x16);
1470 rt2800_bbp_write(rt2x00dev, 73, 0x12); 1501 rt2800_bbp_write(rt2x00dev, 73, 0x12);
1471 } 1502 }
1472 1503
1473 if (rt2x00_rev(&rt2x00dev->chip) > RT2860D_VERSION) 1504 if (rt2x00_rt(rt2x00dev, RT2860) &&
1505 (rt2x00_rev(rt2x00dev) > RT2860D_VERSION))
1474 rt2800_bbp_write(rt2x00dev, 84, 0x19); 1506 rt2800_bbp_write(rt2x00dev, 84, 0x19);
1475 1507
1476 if (rt2x00_intf_is_usb(rt2x00dev) && 1508 if (rt2x00_is_usb(rt2x00dev) &&
1477 rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) { 1509 rt2x00_rt(rt2x00dev, RT3070) &&
1510 (rt2x00_rev(rt2x00dev) == RT3070_VERSION)) {
1478 rt2800_bbp_write(rt2x00dev, 70, 0x0a); 1511 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
1479 rt2800_bbp_write(rt2x00dev, 84, 0x99); 1512 rt2800_bbp_write(rt2x00dev, 84, 0x99);
1480 rt2800_bbp_write(rt2x00dev, 105, 0x05); 1513 rt2800_bbp_write(rt2x00dev, 105, 0x05);
1481 } 1514 }
1482 1515
1483 if (rt2x00_rt(&rt2x00dev->chip, RT3052)) { 1516 if (rt2x00_rt(rt2x00dev, RT3052)) {
1484 rt2800_bbp_write(rt2x00dev, 31, 0x08); 1517 rt2800_bbp_write(rt2x00dev, 31, 0x08);
1485 rt2800_bbp_write(rt2x00dev, 78, 0x0e); 1518 rt2800_bbp_write(rt2x00dev, 78, 0x0e);
1486 rt2800_bbp_write(rt2x00dev, 80, 0x08); 1519 rt2800_bbp_write(rt2x00dev, 80, 0x08);
@@ -1565,14 +1598,15 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
1565 u8 rfcsr; 1598 u8 rfcsr;
1566 u8 bbp; 1599 u8 bbp;
1567 1600
1568 if (rt2x00_intf_is_usb(rt2x00dev) && 1601 if (rt2x00_is_usb(rt2x00dev) &&
1569 rt2x00_rev(&rt2x00dev->chip) != RT3070_VERSION) 1602 rt2x00_rt(rt2x00dev, RT3070) &&
1603 (rt2x00_rev(rt2x00dev) != RT3070_VERSION))
1570 return 0; 1604 return 0;
1571 1605
1572 if (rt2x00_intf_is_pci(rt2x00dev)) { 1606 if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) {
1573 if (!rt2x00_rf(&rt2x00dev->chip, RF3020) && 1607 if (!rt2x00_rf(rt2x00dev, RF3020) &&
1574 !rt2x00_rf(&rt2x00dev->chip, RF3021) && 1608 !rt2x00_rf(rt2x00dev, RF3021) &&
1575 !rt2x00_rf(&rt2x00dev->chip, RF3022)) 1609 !rt2x00_rf(rt2x00dev, RF3022))
1576 return 0; 1610 return 0;
1577 } 1611 }
1578 1612
@@ -1586,7 +1620,7 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
1586 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0); 1620 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
1587 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); 1621 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
1588 1622
1589 if (rt2x00_intf_is_usb(rt2x00dev)) { 1623 if (rt2x00_is_usb(rt2x00dev)) {
1590 rt2800_rfcsr_write(rt2x00dev, 4, 0x40); 1624 rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
1591 rt2800_rfcsr_write(rt2x00dev, 5, 0x03); 1625 rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
1592 rt2800_rfcsr_write(rt2x00dev, 6, 0x02); 1626 rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
@@ -1607,7 +1641,7 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
1607 rt2800_rfcsr_write(rt2x00dev, 25, 0x01); 1641 rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
1608 rt2800_rfcsr_write(rt2x00dev, 27, 0x03); 1642 rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
1609 rt2800_rfcsr_write(rt2x00dev, 29, 0x1f); 1643 rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
1610 } else if (rt2x00_intf_is_pci(rt2x00dev)) { 1644 } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) {
1611 rt2800_rfcsr_write(rt2x00dev, 0, 0x50); 1645 rt2800_rfcsr_write(rt2x00dev, 0, 0x50);
1612 rt2800_rfcsr_write(rt2x00dev, 1, 0x01); 1646 rt2800_rfcsr_write(rt2x00dev, 1, 0x01);
1613 rt2800_rfcsr_write(rt2x00dev, 2, 0xf7); 1647 rt2800_rfcsr_write(rt2x00dev, 2, 0xf7);
@@ -1737,7 +1771,12 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1737 rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2820); 1771 rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2820);
1738 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); 1772 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
1739 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); 1773 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
1740 } else if (rt2x00_rev(&rt2x00dev->chip) < RT2883_VERSION) { 1774 } else if (rt2x00_rt(rt2x00dev, RT2860) ||
1775 rt2x00_rt(rt2x00dev, RT2870) ||
1776 rt2x00_rt(rt2x00dev, RT2872) ||
1777 rt2x00_rt(rt2x00dev, RT2880) ||
1778 (rt2x00_rt(rt2x00dev, RT2883) &&
1779 (rt2x00_rev(rt2x00dev) < RT2883_VERSION))) {
1741 /* 1780 /*
1742 * There is a max of 2 RX streams for RT28x0 series 1781 * There is a max of 2 RX streams for RT28x0 series
1743 */ 1782 */
@@ -1836,36 +1875,34 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
1836 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); 1875 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
1837 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg); 1876 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
1838 1877
1839 rt2x00_set_chip_rf(rt2x00dev, value, reg); 1878 rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
1840 1879 value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
1841 if (rt2x00_intf_is_usb(rt2x00dev)) { 1880
1842 struct rt2x00_chip *chip = &rt2x00dev->chip; 1881 if (!rt2x00_rt(rt2x00dev, RT2860) &&
1843 1882 !rt2x00_rt(rt2x00dev, RT2870) &&
1844 /* 1883 !rt2x00_rt(rt2x00dev, RT2872) &&
1845 * The check for rt2860 is not a typo, some rt2870 hardware 1884 !rt2x00_rt(rt2x00dev, RT2880) &&
1846 * identifies itself as rt2860 in the CSR register. 1885 !rt2x00_rt(rt2x00dev, RT2883) &&
1847 */ 1886 !rt2x00_rt(rt2x00dev, RT2890) &&
1848 if (rt2x00_check_rev(chip, 0xfff00000, 0x28600000) || 1887 !rt2x00_rt(rt2x00dev, RT3052) &&
1849 rt2x00_check_rev(chip, 0xfff00000, 0x28700000) || 1888 !rt2x00_rt(rt2x00dev, RT3070) &&
1850 rt2x00_check_rev(chip, 0xfff00000, 0x28800000)) { 1889 !rt2x00_rt(rt2x00dev, RT3071) &&
1851 rt2x00_set_chip_rt(rt2x00dev, RT2870); 1890 !rt2x00_rt(rt2x00dev, RT3090) &&
1852 } else if (rt2x00_check_rev(chip, 0xffff0000, 0x30700000)) { 1891 !rt2x00_rt(rt2x00dev, RT3390) &&
1853 rt2x00_set_chip_rt(rt2x00dev, RT3070); 1892 !rt2x00_rt(rt2x00dev, RT3572)) {
1854 } else { 1893 ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
1855 ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); 1894 return -ENODEV;
1856 return -ENODEV;
1857 }
1858 } 1895 }
1859 rt2x00_print_chip(rt2x00dev); 1896
1860 1897 if (!rt2x00_rf(rt2x00dev, RF2820) &&
1861 if (!rt2x00_rf(&rt2x00dev->chip, RF2820) && 1898 !rt2x00_rf(rt2x00dev, RF2850) &&
1862 !rt2x00_rf(&rt2x00dev->chip, RF2850) && 1899 !rt2x00_rf(rt2x00dev, RF2720) &&
1863 !rt2x00_rf(&rt2x00dev->chip, RF2720) && 1900 !rt2x00_rf(rt2x00dev, RF2750) &&
1864 !rt2x00_rf(&rt2x00dev->chip, RF2750) && 1901 !rt2x00_rf(rt2x00dev, RF3020) &&
1865 !rt2x00_rf(&rt2x00dev->chip, RF3020) && 1902 !rt2x00_rf(rt2x00dev, RF2020) &&
1866 !rt2x00_rf(&rt2x00dev->chip, RF2020) && 1903 !rt2x00_rf(rt2x00dev, RF3021) &&
1867 !rt2x00_rf(&rt2x00dev->chip, RF3021) && 1904 !rt2x00_rf(rt2x00dev, RF3022) &&
1868 !rt2x00_rf(&rt2x00dev->chip, RF3022)) { 1905 !rt2x00_rf(rt2x00dev, RF3052)) {
1869 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 1906 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
1870 return -ENODEV; 1907 return -ENODEV;
1871 } 1908 }
@@ -2013,7 +2050,6 @@ static const struct rf_channel rf_vals_302x[] = {
2013 2050
2014int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) 2051int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2015{ 2052{
2016 struct rt2x00_chip *chip = &rt2x00dev->chip;
2017 struct hw_mode_spec *spec = &rt2x00dev->spec; 2053 struct hw_mode_spec *spec = &rt2x00dev->spec;
2018 struct channel_info *info; 2054 struct channel_info *info;
2019 char *tx_power1; 2055 char *tx_power1;
@@ -2024,7 +2060,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2024 /* 2060 /*
2025 * Disable powersaving as default on PCI devices. 2061 * Disable powersaving as default on PCI devices.
2026 */ 2062 */
2027 if (rt2x00_intf_is_pci(rt2x00dev)) 2063 if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
2028 rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 2064 rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
2029 2065
2030 /* 2066 /*
@@ -2049,19 +2085,19 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2049 spec->supported_bands = SUPPORT_BAND_2GHZ; 2085 spec->supported_bands = SUPPORT_BAND_2GHZ;
2050 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 2086 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
2051 2087
2052 if (rt2x00_rf(chip, RF2820) || 2088 if (rt2x00_rf(rt2x00dev, RF2820) ||
2053 rt2x00_rf(chip, RF2720) || 2089 rt2x00_rf(rt2x00dev, RF2720) ||
2054 (rt2x00_intf_is_pci(rt2x00dev) && rt2x00_rf(chip, RF3052))) { 2090 rt2x00_rf(rt2x00dev, RF3052)) {
2055 spec->num_channels = 14; 2091 spec->num_channels = 14;
2056 spec->channels = rf_vals; 2092 spec->channels = rf_vals;
2057 } else if (rt2x00_rf(chip, RF2850) || rt2x00_rf(chip, RF2750)) { 2093 } else if (rt2x00_rf(rt2x00dev, RF2850) || rt2x00_rf(rt2x00dev, RF2750)) {
2058 spec->supported_bands |= SUPPORT_BAND_5GHZ; 2094 spec->supported_bands |= SUPPORT_BAND_5GHZ;
2059 spec->num_channels = ARRAY_SIZE(rf_vals); 2095 spec->num_channels = ARRAY_SIZE(rf_vals);
2060 spec->channels = rf_vals; 2096 spec->channels = rf_vals;
2061 } else if (rt2x00_rf(chip, RF3020) || 2097 } else if (rt2x00_rf(rt2x00dev, RF3020) ||
2062 rt2x00_rf(chip, RF2020) || 2098 rt2x00_rf(rt2x00dev, RF2020) ||
2063 rt2x00_rf(chip, RF3021) || 2099 rt2x00_rf(rt2x00dev, RF3021) ||
2064 rt2x00_rf(chip, RF3022)) { 2100 rt2x00_rf(rt2x00dev, RF3022)) {
2065 spec->num_channels = ARRAY_SIZE(rf_vals_302x); 2101 spec->num_channels = ARRAY_SIZE(rf_vals_302x);
2066 spec->channels = rf_vals_302x; 2102 spec->channels = rf_vals_302x;
2067 } 2103 }
@@ -2069,7 +2105,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2069 /* 2105 /*
2070 * Initialize HT information. 2106 * Initialize HT information.
2071 */ 2107 */
2072 if (!rt2x00_rf(chip, RF2020)) 2108 if (!rt2x00_rf(rt2x00dev, RF2020))
2073 spec->ht.ht_supported = true; 2109 spec->ht.ht_supported = true;
2074 else 2110 else
2075 spec->ht.ht_supported = false; 2111 spec->ht.ht_supported = false;
@@ -2282,7 +2318,6 @@ const struct ieee80211_ops rt2800_mac80211_ops = {
2282 .set_rts_threshold = rt2800_set_rts_threshold, 2318 .set_rts_threshold = rt2800_set_rts_threshold,
2283 .bss_info_changed = rt2x00mac_bss_info_changed, 2319 .bss_info_changed = rt2x00mac_bss_info_changed,
2284 .conf_tx = rt2800_conf_tx, 2320 .conf_tx = rt2800_conf_tx,
2285 .get_tx_stats = rt2x00mac_get_tx_stats,
2286 .get_tsf = rt2800_get_tsf, 2321 .get_tsf = rt2800_get_tsf,
2287 .rfkill_poll = rt2x00mac_rfkill_poll, 2322 .rfkill_poll = rt2x00mac_rfkill_poll,
2288}; 2323};
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 535ce22f2ac8..ebabeae62d1b 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -114,8 +114,6 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
114extern const struct rt2x00debug rt2800_rt2x00debug; 114extern const struct rt2x00debug rt2800_rt2x00debug;
115 115
116int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev); 116int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev);
117void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
118 struct rt2x00_led *led, enum led_type type);
119int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev, 117int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
120 struct rt2x00lib_crypto *crypto, 118 struct rt2x00lib_crypto *crypto,
121 struct ieee80211_key_conf *key); 119 struct ieee80211_key_conf *key);
@@ -139,6 +137,7 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
139int rt2800_init_registers(struct rt2x00_dev *rt2x00dev); 137int rt2800_init_registers(struct rt2x00_dev *rt2x00dev);
140int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev); 138int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev);
141int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev); 139int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev);
140int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev);
142 141
143int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev); 142int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev);
144void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev); 143void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index dfc886fcb44d..aca8c124f434 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -48,14 +48,6 @@
48#include "rt2800.h" 48#include "rt2800.h"
49#include "rt2800pci.h" 49#include "rt2800pci.h"
50 50
51#ifdef CONFIG_RT2800PCI_PCI_MODULE
52#define CONFIG_RT2800PCI_PCI
53#endif
54
55#ifdef CONFIG_RT2800PCI_WISOC_MODULE
56#define CONFIG_RT2800PCI_WISOC
57#endif
58
59/* 51/*
60 * Allow hardware encryption to be disabled. 52 * Allow hardware encryption to be disabled.
61 */ 53 */
@@ -87,7 +79,7 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
87 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); 79 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
88} 80}
89 81
90#ifdef CONFIG_RT2800PCI_WISOC 82#ifdef CONFIG_RT2800PCI_SOC
91static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) 83static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
92{ 84{
93 u32 *base_addr = (u32 *) KSEG1ADDR(0x1F040000); /* XXX for RT3052 */ 85 u32 *base_addr = (u32 *) KSEG1ADDR(0x1F040000); /* XXX for RT3052 */
@@ -98,7 +90,7 @@ static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
98static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) 90static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
99{ 91{
100} 92}
101#endif /* CONFIG_RT2800PCI_WISOC */ 93#endif /* CONFIG_RT2800PCI_SOC */
102 94
103#ifdef CONFIG_RT2800PCI_PCI 95#ifdef CONFIG_RT2800PCI_PCI
104static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom) 96static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
@@ -461,24 +453,6 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
461 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg); 453 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
462} 454}
463 455
464static int rt2800pci_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
465{
466 unsigned int i;
467 u32 reg;
468
469 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
470 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
471 if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
472 !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
473 return 0;
474
475 msleep(1);
476 }
477
478 ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
479 return -EACCES;
480}
481
482static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev) 456static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
483{ 457{
484 u32 reg; 458 u32 reg;
@@ -487,10 +461,10 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
487 /* 461 /*
488 * Initialize all registers. 462 * Initialize all registers.
489 */ 463 */
490 if (unlikely(rt2800pci_wait_wpdma_ready(rt2x00dev) || 464 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
491 rt2800pci_init_queues(rt2x00dev) || 465 rt2800pci_init_queues(rt2x00dev) ||
492 rt2800_init_registers(rt2x00dev) || 466 rt2800_init_registers(rt2x00dev) ||
493 rt2800pci_wait_wpdma_ready(rt2x00dev) || 467 rt2800_wait_wpdma_ready(rt2x00dev) ||
494 rt2800_init_bbp(rt2x00dev) || 468 rt2800_init_bbp(rt2x00dev) ||
495 rt2800_init_rfcsr(rt2x00dev))) 469 rt2800_init_rfcsr(rt2x00dev)))
496 return -EIO; 470 return -EIO;
@@ -570,7 +544,7 @@ static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
570 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); 544 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
571 545
572 /* Wait for DMA, ignore error */ 546 /* Wait for DMA, ignore error */
573 rt2800pci_wait_wpdma_ready(rt2x00dev); 547 rt2800_wait_wpdma_ready(rt2x00dev);
574} 548}
575 549
576static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev, 550static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
@@ -835,7 +809,6 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
835 struct rxdone_entry_desc *rxdesc) 809 struct rxdone_entry_desc *rxdesc)
836{ 810{
837 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 811 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
838 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
839 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 812 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
840 __le32 *rxd = entry_priv->desc; 813 __le32 *rxd = entry_priv->desc;
841 __le32 *rxwi = (__le32 *)entry->skb->data; 814 __le32 *rxwi = (__le32 *)entry->skb->data;
@@ -883,10 +856,8 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
883 if (rt2x00_get_field32(rxd3, RXD_W3_MY_BSS)) 856 if (rt2x00_get_field32(rxd3, RXD_W3_MY_BSS))
884 rxdesc->dev_flags |= RXDONE_MY_BSS; 857 rxdesc->dev_flags |= RXDONE_MY_BSS;
885 858
886 if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD)) { 859 if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD))
887 rxdesc->dev_flags |= RXDONE_L2PAD; 860 rxdesc->dev_flags |= RXDONE_L2PAD;
888 skbdesc->flags |= SKBDESC_L2_PADDED;
889 }
890 861
891 if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI)) 862 if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
892 rxdesc->flags |= RX_FLAG_SHORT_GI; 863 rxdesc->flags |= RX_FLAG_SHORT_GI;
@@ -927,7 +898,6 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
927 * Remove TXWI descriptor from start of buffer. 898 * Remove TXWI descriptor from start of buffer.
928 */ 899 */
929 skb_pull(entry->skb, RXWI_DESC_SIZE); 900 skb_pull(entry->skb, RXWI_DESC_SIZE);
930 skb_trim(entry->skb, rxdesc->size);
931} 901}
932 902
933/* 903/*
@@ -1071,18 +1041,12 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1071 /* 1041 /*
1072 * Read EEPROM into buffer 1042 * Read EEPROM into buffer
1073 */ 1043 */
1074 switch (rt2x00dev->chip.rt) { 1044 if (rt2x00_is_soc(rt2x00dev))
1075 case RT2880:
1076 case RT3052:
1077 rt2800pci_read_eeprom_soc(rt2x00dev); 1045 rt2800pci_read_eeprom_soc(rt2x00dev);
1078 break; 1046 else if (rt2800pci_efuse_detect(rt2x00dev))
1079 default: 1047 rt2800pci_read_eeprom_efuse(rt2x00dev);
1080 if (rt2800pci_efuse_detect(rt2x00dev)) 1048 else
1081 rt2800pci_read_eeprom_efuse(rt2x00dev); 1049 rt2800pci_read_eeprom_pci(rt2x00dev);
1082 else
1083 rt2800pci_read_eeprom_pci(rt2x00dev);
1084 break;
1085 }
1086 1050
1087 return rt2800_validate_eeprom(rt2x00dev); 1051 return rt2800_validate_eeprom(rt2x00dev);
1088} 1052}
@@ -1133,8 +1097,7 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1133 /* 1097 /*
1134 * This device requires firmware. 1098 * This device requires firmware.
1135 */ 1099 */
1136 if (!rt2x00_rt(&rt2x00dev->chip, RT2880) && 1100 if (!rt2x00_is_soc(rt2x00dev))
1137 !rt2x00_rt(&rt2x00dev->chip, RT3052))
1138 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); 1101 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
1139 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); 1102 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
1140 __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags); 1103 __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags);
@@ -1221,8 +1184,11 @@ static const struct rt2x00_ops rt2800pci_ops = {
1221/* 1184/*
1222 * RT2800pci module information. 1185 * RT2800pci module information.
1223 */ 1186 */
1224static struct pci_device_id rt2800pci_device_table[] = { 1187static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1225 { PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1188 { PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) },
1189 { PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) },
1190 { PCI_DEVICE(0x1814, 0x0701), PCI_DEVICE_DATA(&rt2800pci_ops) },
1191 { PCI_DEVICE(0x1814, 0x0781), PCI_DEVICE_DATA(&rt2800pci_ops) },
1226 { PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1192 { PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) },
1227 { PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1193 { PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) },
1228 { PCI_DEVICE(0x1432, 0x7728), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1194 { PCI_DEVICE(0x1432, 0x7728), PCI_DEVICE_DATA(&rt2800pci_ops) },
@@ -1230,18 +1196,19 @@ static struct pci_device_id rt2800pci_device_table[] = {
1230 { PCI_DEVICE(0x1432, 0x7748), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1196 { PCI_DEVICE(0x1432, 0x7748), PCI_DEVICE_DATA(&rt2800pci_ops) },
1231 { PCI_DEVICE(0x1432, 0x7758), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1197 { PCI_DEVICE(0x1432, 0x7758), PCI_DEVICE_DATA(&rt2800pci_ops) },
1232 { PCI_DEVICE(0x1432, 0x7768), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1198 { PCI_DEVICE(0x1432, 0x7768), PCI_DEVICE_DATA(&rt2800pci_ops) },
1233 { PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1199 { PCI_DEVICE(0x1a3b, 0x1059), PCI_DEVICE_DATA(&rt2800pci_ops) },
1234 { PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1200#ifdef CONFIG_RT2800PCI_RT30XX
1235 { PCI_DEVICE(0x1814, 0x0701), PCI_DEVICE_DATA(&rt2800pci_ops) },
1236 { PCI_DEVICE(0x1814, 0x0781), PCI_DEVICE_DATA(&rt2800pci_ops) },
1237 { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
1238 { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
1239 { PCI_DEVICE(0x1814, 0x3090), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1201 { PCI_DEVICE(0x1814, 0x3090), PCI_DEVICE_DATA(&rt2800pci_ops) },
1240 { PCI_DEVICE(0x1814, 0x3091), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1202 { PCI_DEVICE(0x1814, 0x3091), PCI_DEVICE_DATA(&rt2800pci_ops) },
1241 { PCI_DEVICE(0x1814, 0x3092), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1203 { PCI_DEVICE(0x1814, 0x3092), PCI_DEVICE_DATA(&rt2800pci_ops) },
1204 { PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) },
1205#endif
1206#ifdef CONFIG_RT2800PCI_RT35XX
1207 { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
1208 { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
1242 { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1209 { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
1243 { PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1210 { PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) },
1244 { PCI_DEVICE(0x1a3b, 0x1059), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1211#endif
1245 { 0, } 1212 { 0, }
1246}; 1213};
1247 1214
@@ -1255,12 +1222,11 @@ MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
1255#endif /* CONFIG_RT2800PCI_PCI */ 1222#endif /* CONFIG_RT2800PCI_PCI */
1256MODULE_LICENSE("GPL"); 1223MODULE_LICENSE("GPL");
1257 1224
1258#ifdef CONFIG_RT2800PCI_WISOC 1225#ifdef CONFIG_RT2800PCI_SOC
1259#if defined(CONFIG_RALINK_RT288X) 1226static int rt2800soc_probe(struct platform_device *pdev)
1260__rt2x00soc_probe(RT2880, &rt2800pci_ops); 1227{
1261#elif defined(CONFIG_RALINK_RT305X) 1228 return rt2x00soc_probe(pdev, rt2800pci_ops);
1262__rt2x00soc_probe(RT3052, &rt2800pci_ops); 1229}
1263#endif
1264 1230
1265static struct platform_driver rt2800soc_driver = { 1231static struct platform_driver rt2800soc_driver = {
1266 .driver = { 1232 .driver = {
@@ -1268,12 +1234,12 @@ static struct platform_driver rt2800soc_driver = {
1268 .owner = THIS_MODULE, 1234 .owner = THIS_MODULE,
1269 .mod_name = KBUILD_MODNAME, 1235 .mod_name = KBUILD_MODNAME,
1270 }, 1236 },
1271 .probe = __rt2x00soc_probe, 1237 .probe = rt2800soc_probe,
1272 .remove = __devexit_p(rt2x00soc_remove), 1238 .remove = __devexit_p(rt2x00soc_remove),
1273 .suspend = rt2x00soc_suspend, 1239 .suspend = rt2x00soc_suspend,
1274 .resume = rt2x00soc_resume, 1240 .resume = rt2x00soc_resume,
1275}; 1241};
1276#endif /* CONFIG_RT2800PCI_WISOC */ 1242#endif /* CONFIG_RT2800PCI_SOC */
1277 1243
1278#ifdef CONFIG_RT2800PCI_PCI 1244#ifdef CONFIG_RT2800PCI_PCI
1279static struct pci_driver rt2800pci_driver = { 1245static struct pci_driver rt2800pci_driver = {
@@ -1290,7 +1256,7 @@ static int __init rt2800pci_init(void)
1290{ 1256{
1291 int ret = 0; 1257 int ret = 0;
1292 1258
1293#ifdef CONFIG_RT2800PCI_WISOC 1259#ifdef CONFIG_RT2800PCI_SOC
1294 ret = platform_driver_register(&rt2800soc_driver); 1260 ret = platform_driver_register(&rt2800soc_driver);
1295 if (ret) 1261 if (ret)
1296 return ret; 1262 return ret;
@@ -1298,7 +1264,7 @@ static int __init rt2800pci_init(void)
1298#ifdef CONFIG_RT2800PCI_PCI 1264#ifdef CONFIG_RT2800PCI_PCI
1299 ret = pci_register_driver(&rt2800pci_driver); 1265 ret = pci_register_driver(&rt2800pci_driver);
1300 if (ret) { 1266 if (ret) {
1301#ifdef CONFIG_RT2800PCI_WISOC 1267#ifdef CONFIG_RT2800PCI_SOC
1302 platform_driver_unregister(&rt2800soc_driver); 1268 platform_driver_unregister(&rt2800soc_driver);
1303#endif 1269#endif
1304 return ret; 1270 return ret;
@@ -1313,7 +1279,7 @@ static void __exit rt2800pci_exit(void)
1313#ifdef CONFIG_RT2800PCI_PCI 1279#ifdef CONFIG_RT2800PCI_PCI
1314 pci_unregister_driver(&rt2800pci_driver); 1280 pci_unregister_driver(&rt2800pci_driver);
1315#endif 1281#endif
1316#ifdef CONFIG_RT2800PCI_WISOC 1282#ifdef CONFIG_RT2800PCI_SOC
1317 platform_driver_unregister(&rt2800soc_driver); 1283 platform_driver_unregister(&rt2800soc_driver);
1318#endif 1284#endif
1319} 1285}
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index ab95346cf6a3..5e4ee2023fcf 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -92,7 +92,6 @@ static bool rt2800usb_check_crc(const u8 *data, const size_t len)
92static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev, 92static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev,
93 const u8 *data, const size_t len) 93 const u8 *data, const size_t len)
94{ 94{
95 u16 chipset = (rt2x00_rev(&rt2x00dev->chip) >> 16) & 0xffff;
96 size_t offset = 0; 95 size_t offset = 0;
97 96
98 /* 97 /*
@@ -111,9 +110,9 @@ static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev,
111 * Check if we need the upper 4kb firmware data or not. 110 * Check if we need the upper 4kb firmware data or not.
112 */ 111 */
113 if ((len == 4096) && 112 if ((len == 4096) &&
114 (chipset != 0x2860) && 113 !rt2x00_rt(rt2x00dev, RT2860) &&
115 (chipset != 0x2872) && 114 !rt2x00_rt(rt2x00dev, RT2872) &&
116 (chipset != 0x3070)) 115 !rt2x00_rt(rt2x00dev, RT3070))
117 return FW_BAD_VERSION; 116 return FW_BAD_VERSION;
118 117
119 /* 118 /*
@@ -138,14 +137,13 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
138 u32 reg; 137 u32 reg;
139 u32 offset; 138 u32 offset;
140 u32 length; 139 u32 length;
141 u16 chipset = (rt2x00_rev(&rt2x00dev->chip) >> 16) & 0xffff;
142 140
143 /* 141 /*
144 * Check which section of the firmware we need. 142 * Check which section of the firmware we need.
145 */ 143 */
146 if ((chipset == 0x2860) || 144 if (rt2x00_rt(rt2x00dev, RT2860) ||
147 (chipset == 0x2872) || 145 rt2x00_rt(rt2x00dev, RT2872) ||
148 (chipset == 0x3070)) { 146 rt2x00_rt(rt2x00dev, RT3070)) {
149 offset = 0; 147 offset = 0;
150 length = 4096; 148 length = 4096;
151 } else { 149 } else {
@@ -200,9 +198,9 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
200 */ 198 */
201 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0); 199 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0);
202 200
203 if ((chipset == 0x3070) || 201 if (rt2x00_rt(rt2x00dev, RT3070) ||
204 (chipset == 0x3071) || 202 rt2x00_rt(rt2x00dev, RT3071) ||
205 (chipset == 0x3572)) { 203 rt2x00_rt(rt2x00dev, RT3572)) {
206 udelay(200); 204 udelay(200);
207 rt2800_mcu_request(rt2x00dev, MCU_CURRENT, 0, 0, 0); 205 rt2800_mcu_request(rt2x00dev, MCU_CURRENT, 0, 0, 0);
208 udelay(10); 206 udelay(10);
@@ -248,24 +246,6 @@ static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
248 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 246 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
249} 247}
250 248
251static int rt2800usb_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
252{
253 unsigned int i;
254 u32 reg;
255
256 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
257 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
258 if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
259 !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
260 return 0;
261
262 msleep(1);
263 }
264
265 ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
266 return -EACCES;
267}
268
269static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev) 249static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
270{ 250{
271 u32 reg; 251 u32 reg;
@@ -274,7 +254,7 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
274 /* 254 /*
275 * Initialize all registers. 255 * Initialize all registers.
276 */ 256 */
277 if (unlikely(rt2800usb_wait_wpdma_ready(rt2x00dev) || 257 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
278 rt2800_init_registers(rt2x00dev) || 258 rt2800_init_registers(rt2x00dev) ||
279 rt2800_init_bbp(rt2x00dev) || 259 rt2800_init_bbp(rt2x00dev) ||
280 rt2800_init_rfcsr(rt2x00dev))) 260 rt2800_init_rfcsr(rt2x00dev)))
@@ -295,9 +275,7 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
295 275
296 rt2800_register_read(rt2x00dev, USB_DMA_CFG, &reg); 276 rt2800_register_read(rt2x00dev, USB_DMA_CFG, &reg);
297 rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0); 277 rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0);
298 /* Don't use bulk in aggregation when working with USB 1.1 */ 278 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN, 0);
299 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN,
300 (rt2x00dev->rx->usb_maxpacket == 512));
301 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_TIMEOUT, 128); 279 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_TIMEOUT, 128);
302 /* 280 /*
303 * Total room for RX frames in kilobytes, PBF might still exceed 281 * Total room for RX frames in kilobytes, PBF might still exceed
@@ -346,7 +324,7 @@ static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev)
346 rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0); 324 rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
347 325
348 /* Wait for DMA, ignore error */ 326 /* Wait for DMA, ignore error */
349 rt2800usb_wait_wpdma_ready(rt2x00dev); 327 rt2800_wait_wpdma_ready(rt2x00dev);
350 328
351 rt2x00usb_disable_radio(rt2x00dev); 329 rt2x00usb_disable_radio(rt2x00dev);
352} 330}
@@ -573,41 +551,57 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
573{ 551{
574 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 552 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
575 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 553 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
576 __le32 *rxd = (__le32 *)entry->skb->data; 554 __le32 *rxi = (__le32 *)entry->skb->data;
577 __le32 *rxwi; 555 __le32 *rxwi;
578 u32 rxd0; 556 __le32 *rxd;
557 u32 rxi0;
579 u32 rxwi0; 558 u32 rxwi0;
580 u32 rxwi1; 559 u32 rxwi1;
581 u32 rxwi2; 560 u32 rxwi2;
582 u32 rxwi3; 561 u32 rxwi3;
562 u32 rxd0;
563 int rx_pkt_len;
564
565 /*
566 * RX frame format is :
567 * | RXINFO | RXWI | header | L2 pad | payload | pad | RXD | USB pad |
568 * |<------------ rx_pkt_len -------------->|
569 */
570 rt2x00_desc_read(rxi, 0, &rxi0);
571 rx_pkt_len = rt2x00_get_field32(rxi0, RXINFO_W0_USB_DMA_RX_PKT_LEN);
572
573 rxwi = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE);
574
575 /*
576 * FIXME : we need to check for rx_pkt_len validity
577 */
578 rxd = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE + rx_pkt_len);
583 579
584 /* 580 /*
585 * Copy descriptor to the skbdesc->desc buffer, making it safe from 581 * Copy descriptor to the skbdesc->desc buffer, making it safe from
586 * moving of frame data in rt2x00usb. 582 * moving of frame data in rt2x00usb.
587 */ 583 */
588 memcpy(skbdesc->desc, rxd, skbdesc->desc_len); 584 memcpy(skbdesc->desc, rxi, skbdesc->desc_len);
589 rxd = (__le32 *)skbdesc->desc;
590 rxwi = &rxd[RXINFO_DESC_SIZE / sizeof(__le32)];
591 585
592 /* 586 /*
593 * It is now safe to read the descriptor on all architectures. 587 * It is now safe to read the descriptor on all architectures.
594 */ 588 */
595 rt2x00_desc_read(rxd, 0, &rxd0);
596 rt2x00_desc_read(rxwi, 0, &rxwi0); 589 rt2x00_desc_read(rxwi, 0, &rxwi0);
597 rt2x00_desc_read(rxwi, 1, &rxwi1); 590 rt2x00_desc_read(rxwi, 1, &rxwi1);
598 rt2x00_desc_read(rxwi, 2, &rxwi2); 591 rt2x00_desc_read(rxwi, 2, &rxwi2);
599 rt2x00_desc_read(rxwi, 3, &rxwi3); 592 rt2x00_desc_read(rxwi, 3, &rxwi3);
593 rt2x00_desc_read(rxd, 0, &rxd0);
600 594
601 if (rt2x00_get_field32(rxd0, RXINFO_W0_CRC_ERROR)) 595 if (rt2x00_get_field32(rxd0, RXD_W0_CRC_ERROR))
602 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 596 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
603 597
604 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { 598 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
605 rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF); 599 rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
606 rxdesc->cipher_status = 600 rxdesc->cipher_status =
607 rt2x00_get_field32(rxd0, RXINFO_W0_CIPHER_ERROR); 601 rt2x00_get_field32(rxd0, RXD_W0_CIPHER_ERROR);
608 } 602 }
609 603
610 if (rt2x00_get_field32(rxd0, RXINFO_W0_DECRYPTED)) { 604 if (rt2x00_get_field32(rxd0, RXD_W0_DECRYPTED)) {
611 /* 605 /*
612 * Hardware has stripped IV/EIV data from 802.11 frame during 606 * Hardware has stripped IV/EIV data from 802.11 frame during
613 * decryption. Unfortunately the descriptor doesn't contain 607 * decryption. Unfortunately the descriptor doesn't contain
@@ -622,13 +616,11 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
622 rxdesc->flags |= RX_FLAG_MMIC_ERROR; 616 rxdesc->flags |= RX_FLAG_MMIC_ERROR;
623 } 617 }
624 618
625 if (rt2x00_get_field32(rxd0, RXINFO_W0_MY_BSS)) 619 if (rt2x00_get_field32(rxd0, RXD_W0_MY_BSS))
626 rxdesc->dev_flags |= RXDONE_MY_BSS; 620 rxdesc->dev_flags |= RXDONE_MY_BSS;
627 621
628 if (rt2x00_get_field32(rxd0, RXINFO_W0_L2PAD)) { 622 if (rt2x00_get_field32(rxd0, RXD_W0_L2PAD))
629 rxdesc->dev_flags |= RXDONE_L2PAD; 623 rxdesc->dev_flags |= RXDONE_L2PAD;
630 skbdesc->flags |= SKBDESC_L2_PADDED;
631 }
632 624
633 if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI)) 625 if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
634 rxdesc->flags |= RX_FLAG_SHORT_GI; 626 rxdesc->flags |= RX_FLAG_SHORT_GI;
@@ -663,7 +655,6 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
663 * Remove RXWI descriptor from start of buffer. 655 * Remove RXWI descriptor from start of buffer.
664 */ 656 */
665 skb_pull(entry->skb, skbdesc->desc_len); 657 skb_pull(entry->skb, skbdesc->desc_len);
666 skb_trim(entry->skb, rxdesc->size);
667} 658}
668 659
669/* 660/*
@@ -814,51 +805,27 @@ static struct usb_device_id rt2800usb_device_table[] = {
814 /* Abocom */ 805 /* Abocom */
815 { USB_DEVICE(0x07b8, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) }, 806 { USB_DEVICE(0x07b8, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
816 { USB_DEVICE(0x07b8, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) }, 807 { USB_DEVICE(0x07b8, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
817 { USB_DEVICE(0x07b8, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
818 { USB_DEVICE(0x07b8, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
819 { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
820 { USB_DEVICE(0x1482, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) }, 808 { USB_DEVICE(0x1482, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
821 /* AirTies */
822 { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) },
823 /* Amigo */
824 { USB_DEVICE(0x0e0b, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
825 { USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) },
826 /* Amit */ 809 /* Amit */
827 { USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) }, 810 { USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
828 /* Askey */ 811 /* Askey */
829 { USB_DEVICE(0x1690, 0x0740), USB_DEVICE_DATA(&rt2800usb_ops) }, 812 { USB_DEVICE(0x1690, 0x0740), USB_DEVICE_DATA(&rt2800usb_ops) },
830 { USB_DEVICE(0x1690, 0x0744), USB_DEVICE_DATA(&rt2800usb_ops) },
831 { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) },
832 /* ASUS */ 813 /* ASUS */
833 { USB_DEVICE(0x0b05, 0x1731), USB_DEVICE_DATA(&rt2800usb_ops) }, 814 { USB_DEVICE(0x0b05, 0x1731), USB_DEVICE_DATA(&rt2800usb_ops) },
834 { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) }, 815 { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) },
835 { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) }, 816 { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) },
836 { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) },
837 { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) },
838 { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
839 /* AzureWave */ 817 /* AzureWave */
840 { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) }, 818 { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) },
841 { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) },
842 { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
843 { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) },
844 { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
845 /* Belkin */ 819 /* Belkin */
846 { USB_DEVICE(0x050d, 0x8053), USB_DEVICE_DATA(&rt2800usb_ops) }, 820 { USB_DEVICE(0x050d, 0x8053), USB_DEVICE_DATA(&rt2800usb_ops) },
847 { USB_DEVICE(0x050d, 0x805c), USB_DEVICE_DATA(&rt2800usb_ops) }, 821 { USB_DEVICE(0x050d, 0x805c), USB_DEVICE_DATA(&rt2800usb_ops) },
848 { USB_DEVICE(0x050d, 0x815c), USB_DEVICE_DATA(&rt2800usb_ops) }, 822 { USB_DEVICE(0x050d, 0x815c), USB_DEVICE_DATA(&rt2800usb_ops) },
849 { USB_DEVICE(0x050d, 0x825a), USB_DEVICE_DATA(&rt2800usb_ops) },
850 /* Buffalo */ 823 /* Buffalo */
851 { USB_DEVICE(0x0411, 0x00e8), USB_DEVICE_DATA(&rt2800usb_ops) }, 824 { USB_DEVICE(0x0411, 0x00e8), USB_DEVICE_DATA(&rt2800usb_ops) },
852 { USB_DEVICE(0x0411, 0x012e), USB_DEVICE_DATA(&rt2800usb_ops) },
853 /* Cisco */
854 { USB_DEVICE(0x167b, 0x4001), USB_DEVICE_DATA(&rt2800usb_ops) },
855 /* Conceptronic */ 825 /* Conceptronic */
856 { USB_DEVICE(0x14b2, 0x3c06), USB_DEVICE_DATA(&rt2800usb_ops) }, 826 { USB_DEVICE(0x14b2, 0x3c06), USB_DEVICE_DATA(&rt2800usb_ops) },
857 { USB_DEVICE(0x14b2, 0x3c07), USB_DEVICE_DATA(&rt2800usb_ops) }, 827 { USB_DEVICE(0x14b2, 0x3c07), USB_DEVICE_DATA(&rt2800usb_ops) },
858 { USB_DEVICE(0x14b2, 0x3c08), USB_DEVICE_DATA(&rt2800usb_ops) },
859 { USB_DEVICE(0x14b2, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) }, 828 { USB_DEVICE(0x14b2, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
860 { USB_DEVICE(0x14b2, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
861 { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) },
862 { USB_DEVICE(0x14b2, 0x3c23), USB_DEVICE_DATA(&rt2800usb_ops) }, 829 { USB_DEVICE(0x14b2, 0x3c23), USB_DEVICE_DATA(&rt2800usb_ops) },
863 { USB_DEVICE(0x14b2, 0x3c25), USB_DEVICE_DATA(&rt2800usb_ops) }, 830 { USB_DEVICE(0x14b2, 0x3c25), USB_DEVICE_DATA(&rt2800usb_ops) },
864 { USB_DEVICE(0x14b2, 0x3c27), USB_DEVICE_DATA(&rt2800usb_ops) }, 831 { USB_DEVICE(0x14b2, 0x3c27), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -867,157 +834,257 @@ static struct usb_device_id rt2800usb_device_table[] = {
867 { USB_DEVICE(0x07aa, 0x002f), USB_DEVICE_DATA(&rt2800usb_ops) }, 834 { USB_DEVICE(0x07aa, 0x002f), USB_DEVICE_DATA(&rt2800usb_ops) },
868 { USB_DEVICE(0x07aa, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) }, 835 { USB_DEVICE(0x07aa, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) },
869 { USB_DEVICE(0x07aa, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) }, 836 { USB_DEVICE(0x07aa, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
870 { USB_DEVICE(0x07aa, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
871 { USB_DEVICE(0x07aa, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
872 { USB_DEVICE(0x18c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
873 { USB_DEVICE(0x18c5, 0x0012), USB_DEVICE_DATA(&rt2800usb_ops) },
874 /* D-Link */ 837 /* D-Link */
875 { USB_DEVICE(0x07d1, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) }, 838 { USB_DEVICE(0x07d1, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
839 { USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
840 /* Edimax */
841 { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) },
842 { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) },
843 /* EnGenius */
844 { USB_DEVICE(0X1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) },
845 { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) },
846 /* Gigabyte */
847 { USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) },
848 /* Hawking */
849 { USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) },
850 { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) },
851 /* Linksys */
852 { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
853 { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
854 /* Logitec */
855 { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) },
856 { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) },
857 { USB_DEVICE(0x0789, 0x0164), USB_DEVICE_DATA(&rt2800usb_ops) },
858 /* Motorola */
859 { USB_DEVICE(0x100d, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
860 /* MSI */
861 { USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) },
862 /* Philips */
863 { USB_DEVICE(0x0471, 0x200f), USB_DEVICE_DATA(&rt2800usb_ops) },
864 /* Planex */
865 { USB_DEVICE(0x2019, 0xed06), USB_DEVICE_DATA(&rt2800usb_ops) },
866 /* Ralink */
867 { USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
868 { USB_DEVICE(0x148f, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
869 /* Samsung */
870 { USB_DEVICE(0x04e8, 0x2018), USB_DEVICE_DATA(&rt2800usb_ops) },
871 /* Siemens */
872 { USB_DEVICE(0x129b, 0x1828), USB_DEVICE_DATA(&rt2800usb_ops) },
873 /* Sitecom */
874 { USB_DEVICE(0x0df6, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) },
875 { USB_DEVICE(0x0df6, 0x002b), USB_DEVICE_DATA(&rt2800usb_ops) },
876 { USB_DEVICE(0x0df6, 0x002c), USB_DEVICE_DATA(&rt2800usb_ops) },
877 { USB_DEVICE(0x0df6, 0x002d), USB_DEVICE_DATA(&rt2800usb_ops) },
878 { USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) },
879 { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
880 /* SMC */
881 { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) },
882 { USB_DEVICE(0x083a, 0x7512), USB_DEVICE_DATA(&rt2800usb_ops) },
883 { USB_DEVICE(0x083a, 0x7522), USB_DEVICE_DATA(&rt2800usb_ops) },
884 { USB_DEVICE(0x083a, 0x8522), USB_DEVICE_DATA(&rt2800usb_ops) },
885 { USB_DEVICE(0x083a, 0xa618), USB_DEVICE_DATA(&rt2800usb_ops) },
886 { USB_DEVICE(0x083a, 0xb522), USB_DEVICE_DATA(&rt2800usb_ops) },
887 /* Sparklan */
888 { USB_DEVICE(0x15a9, 0x0006), USB_DEVICE_DATA(&rt2800usb_ops) },
889 /* Sweex */
890 { USB_DEVICE(0x177f, 0x0302), USB_DEVICE_DATA(&rt2800usb_ops) },
891 /* U-Media*/
892 { USB_DEVICE(0x157e, 0x300e), USB_DEVICE_DATA(&rt2800usb_ops) },
893 /* ZCOM */
894 { USB_DEVICE(0x0cde, 0x0022), USB_DEVICE_DATA(&rt2800usb_ops) },
895 { USB_DEVICE(0x0cde, 0x0025), USB_DEVICE_DATA(&rt2800usb_ops) },
896 /* Zinwell */
897 { USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) },
898 { USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) },
899 /* Zyxel */
900 { USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) },
901#ifdef CONFIG_RT2800USB_RT30XX
902 /* Abocom */
903 { USB_DEVICE(0x07b8, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
904 { USB_DEVICE(0x07b8, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
905 { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
906 /* AirTies */
907 { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) },
908 /* AzureWave */
909 { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
910 /* Conceptronic */
911 { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) },
912 /* Corega */
913 { USB_DEVICE(0x18c5, 0x0012), USB_DEVICE_DATA(&rt2800usb_ops) },
914 /* D-Link */
876 { USB_DEVICE(0x07d1, 0x3c0a), USB_DEVICE_DATA(&rt2800usb_ops) }, 915 { USB_DEVICE(0x07d1, 0x3c0a), USB_DEVICE_DATA(&rt2800usb_ops) },
877 { USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) },
878 { USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) }, 916 { USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) },
879 { USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) }, 917 { USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) },
880 { USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) }, 918 { USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) },
881 { USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
882 { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
883 { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) },
884 /* Edimax */ 919 /* Edimax */
885 { USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) }, 920 { USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) },
886 { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) },
887 { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) },
888 /* Encore */ 921 /* Encore */
889 { USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) }, 922 { USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) },
890 { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) },
891 { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
892 /* EnGenius */ 923 /* EnGenius */
893 { USB_DEVICE(0X1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) },
894 { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) },
895 { USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) }, 924 { USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) },
896 { USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) }, 925 { USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) },
897 { USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) }, 926 { USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) },
927 /* Gigabyte */
928 { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) },
929 /* I-O DATA */
930 { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
931 /* MSI */
932 { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
933 /* Pegatron */
934 { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
935 { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) },
936 /* Planex */
937 { USB_DEVICE(0x2019, 0xab25), USB_DEVICE_DATA(&rt2800usb_ops) },
938 /* Quanta */
939 { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) },
940 /* Ralink */
941 { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
942 { USB_DEVICE(0x148f, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
943 { USB_DEVICE(0x148f, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
944 { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
945 /* Sitecom */
946 { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) },
947 { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
948 /* SMC */
949 { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
950 /* Zinwell */
951 { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
952 { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
953#endif
954#ifdef CONFIG_RT2800USB_RT35XX
955 /* Askey */
956 { USB_DEVICE(0x1690, 0x0744), USB_DEVICE_DATA(&rt2800usb_ops) },
957 /* Cisco */
958 { USB_DEVICE(0x167b, 0x4001), USB_DEVICE_DATA(&rt2800usb_ops) },
959 /* EnGenius */
960 { USB_DEVICE(0x1740, 0x9801), USB_DEVICE_DATA(&rt2800usb_ops) },
961 /* I-O DATA */
962 { USB_DEVICE(0x04bb, 0x0944), USB_DEVICE_DATA(&rt2800usb_ops) },
963 /* Ralink */
964 { USB_DEVICE(0x148f, 0x3370), USB_DEVICE_DATA(&rt2800usb_ops) },
965 { USB_DEVICE(0x148f, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
966 { USB_DEVICE(0x148f, 0x8070), USB_DEVICE_DATA(&rt2800usb_ops) },
967 /* Sitecom */
968 { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
969 /* Zinwell */
970 { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) },
971#endif
972#ifdef CONFIG_RT2800USB_UNKNOWN
973 /*
974 * Unclear what kind of devices these are (they aren't supported by the
975 * vendor driver).
976 */
977 /* Allwin */
978 { USB_DEVICE(0x8516, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
979 { USB_DEVICE(0x8516, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
980 { USB_DEVICE(0x8516, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
981 { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
982 { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
983 { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
984 { USB_DEVICE(0x8516, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
985 /* Amigo */
986 { USB_DEVICE(0x0e0b, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
987 { USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) },
988 /* Askey */
989 { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) },
990 /* ASUS */
991 { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) },
992 { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) },
993 { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
994 { USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) },
995 { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) },
996 /* AzureWave */
997 { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) },
998 { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) },
999 { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
1000 /* Belkin */
1001 { USB_DEVICE(0x050d, 0x825a), USB_DEVICE_DATA(&rt2800usb_ops) },
1002 /* Buffalo */
1003 { USB_DEVICE(0x0411, 0x012e), USB_DEVICE_DATA(&rt2800usb_ops) },
1004 { USB_DEVICE(0x0411, 0x0148), USB_DEVICE_DATA(&rt2800usb_ops) },
1005 { USB_DEVICE(0x0411, 0x0150), USB_DEVICE_DATA(&rt2800usb_ops) },
1006 { USB_DEVICE(0x0411, 0x015d), USB_DEVICE_DATA(&rt2800usb_ops) },
1007 /* Conceptronic */
1008 { USB_DEVICE(0x14b2, 0x3c08), USB_DEVICE_DATA(&rt2800usb_ops) },
1009 { USB_DEVICE(0x14b2, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
1010 /* Corega */
1011 { USB_DEVICE(0x07aa, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
1012 { USB_DEVICE(0x07aa, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
1013 { USB_DEVICE(0x18c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
1014 /* D-Link */
1015 { USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) },
1016 { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
1017 { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) },
1018 { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) },
1019 /* Encore */
1020 { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) },
1021 { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
1022 /* EnGenius */
898 { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) }, 1023 { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) },
899 { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) }, 1024 { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) },
900 { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) }, 1025 { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) },
901 { USB_DEVICE(0x1740, 0x9801), USB_DEVICE_DATA(&rt2800usb_ops) },
902 /* Gemtek */ 1026 /* Gemtek */
903 { USB_DEVICE(0x15a9, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) }, 1027 { USB_DEVICE(0x15a9, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
904 /* Gigabyte */ 1028 /* Gigabyte */
905 { USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) },
906 { USB_DEVICE(0x1044, 0x800c), USB_DEVICE_DATA(&rt2800usb_ops) }, 1029 { USB_DEVICE(0x1044, 0x800c), USB_DEVICE_DATA(&rt2800usb_ops) },
907 { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) },
908 /* Hawking */ 1030 /* Hawking */
909 { USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) },
910 { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) },
911 { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) }, 1031 { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) },
912 { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) }, 1032 { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) },
913 /* I-O DATA */ 1033 /* I-O DATA */
914 { USB_DEVICE(0x04bb, 0x0944), USB_DEVICE_DATA(&rt2800usb_ops) },
915 { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
916 { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) }, 1034 { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) },
917 { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) }, 1035 { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) },
918 /* LevelOne */ 1036 /* LevelOne */
919 { USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) }, 1037 { USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) },
920 { USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) }, 1038 { USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) },
921 /* Linksys */ 1039 /* Linksys */
922 { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
923 { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
924 { USB_DEVICE(0x1737, 0x0077), USB_DEVICE_DATA(&rt2800usb_ops) }, 1040 { USB_DEVICE(0x1737, 0x0077), USB_DEVICE_DATA(&rt2800usb_ops) },
1041 { USB_DEVICE(0x1737, 0x0078), USB_DEVICE_DATA(&rt2800usb_ops) },
925 { USB_DEVICE(0x1737, 0x0079), USB_DEVICE_DATA(&rt2800usb_ops) }, 1042 { USB_DEVICE(0x1737, 0x0079), USB_DEVICE_DATA(&rt2800usb_ops) },
926 /* Logitec */
927 { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) },
928 { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) },
929 { USB_DEVICE(0x0789, 0x0164), USB_DEVICE_DATA(&rt2800usb_ops) },
930 /* Motorola */ 1043 /* Motorola */
931 { USB_DEVICE(0x100d, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
932 { USB_DEVICE(0x100d, 0x9032), USB_DEVICE_DATA(&rt2800usb_ops) }, 1044 { USB_DEVICE(0x100d, 0x9032), USB_DEVICE_DATA(&rt2800usb_ops) },
933 /* MSI */ 1045 /* MSI */
934 { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
935 { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) }, 1046 { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) },
1047 { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) },
936 { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) }, 1048 { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) },
937 { USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) }, 1049 { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) },
938 { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) }, 1050 { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) },
1051 { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) },
939 { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) }, 1052 { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) },
1053 { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) },
940 { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) }, 1054 { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) },
941 /* Ovislink */ 1055 /* Ovislink */
942 { USB_DEVICE(0x1b75, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, 1056 { USB_DEVICE(0x1b75, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
943 /* Para */ 1057 /* Para */
944 { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) }, 1058 { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) },
945 /* Pegatron */ 1059 /* Pegatron */
1060 { USB_DEVICE(0x05a6, 0x0101), USB_DEVICE_DATA(&rt2800usb_ops) },
946 { USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) }, 1061 { USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) },
947 { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) }, 1062 { USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
948 { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) },
949 /* Philips */
950 { USB_DEVICE(0x0471, 0x200f), USB_DEVICE_DATA(&rt2800usb_ops) },
951 /* Planex */ 1063 /* Planex */
952 { USB_DEVICE(0x2019, 0xed06), USB_DEVICE_DATA(&rt2800usb_ops) },
953 { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) }, 1064 { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) },
954 { USB_DEVICE(0x2019, 0xab25), USB_DEVICE_DATA(&rt2800usb_ops) },
955 /* Qcom */ 1065 /* Qcom */
956 { USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) }, 1066 { USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) },
957 /* Quanta */
958 { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) },
959 /* Ralink */
960 { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
961 { USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
962 { USB_DEVICE(0x148f, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
963 { USB_DEVICE(0x148f, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
964 { USB_DEVICE(0x148f, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
965 { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
966 { USB_DEVICE(0x148f, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
967 /* Samsung */
968 { USB_DEVICE(0x04e8, 0x2018), USB_DEVICE_DATA(&rt2800usb_ops) },
969 /* Siemens */
970 { USB_DEVICE(0x129b, 0x1828), USB_DEVICE_DATA(&rt2800usb_ops) },
971 /* Sitecom */ 1067 /* Sitecom */
972 { USB_DEVICE(0x0df6, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) },
973 { USB_DEVICE(0x0df6, 0x002b), USB_DEVICE_DATA(&rt2800usb_ops) },
974 { USB_DEVICE(0x0df6, 0x002c), USB_DEVICE_DATA(&rt2800usb_ops) },
975 { USB_DEVICE(0x0df6, 0x002d), USB_DEVICE_DATA(&rt2800usb_ops) },
976 { USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) },
977 { USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) }, 1068 { USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) },
978 { USB_DEVICE(0x0df6, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) }, 1069 { USB_DEVICE(0x0df6, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) },
979 { USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) }, 1070 { USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) },
980 { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) },
981 { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
982 { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) }, 1071 { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
983 { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
984 { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
985 { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) }, 1072 { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) },
986 { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) }, 1073 { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) },
987 { USB_DEVICE(0x0df6, 0x004a), USB_DEVICE_DATA(&rt2800usb_ops) }, 1074 { USB_DEVICE(0x0df6, 0x004a), USB_DEVICE_DATA(&rt2800usb_ops) },
988 { USB_DEVICE(0x0df6, 0x004d), USB_DEVICE_DATA(&rt2800usb_ops) }, 1075 { USB_DEVICE(0x0df6, 0x004d), USB_DEVICE_DATA(&rt2800usb_ops) },
989 /* SMC */ 1076 /* SMC */
990 { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) },
991 { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
992 { USB_DEVICE(0x083a, 0x7512), USB_DEVICE_DATA(&rt2800usb_ops) },
993 { USB_DEVICE(0x083a, 0x7522), USB_DEVICE_DATA(&rt2800usb_ops) },
994 { USB_DEVICE(0x083a, 0x8522), USB_DEVICE_DATA(&rt2800usb_ops) },
995 { USB_DEVICE(0x083a, 0xa512), USB_DEVICE_DATA(&rt2800usb_ops) }, 1077 { USB_DEVICE(0x083a, 0xa512), USB_DEVICE_DATA(&rt2800usb_ops) },
996 { USB_DEVICE(0x083a, 0xa618), USB_DEVICE_DATA(&rt2800usb_ops) },
997 { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) }, 1078 { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) },
998 { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) }, 1079 { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) },
999 { USB_DEVICE(0x083a, 0xb522), USB_DEVICE_DATA(&rt2800usb_ops) },
1000 { USB_DEVICE(0x083a, 0xc522), USB_DEVICE_DATA(&rt2800usb_ops) }, 1080 { USB_DEVICE(0x083a, 0xc522), USB_DEVICE_DATA(&rt2800usb_ops) },
1001 /* Sparklan */ 1081 { USB_DEVICE(0x083a, 0xd522), USB_DEVICE_DATA(&rt2800usb_ops) },
1002 { USB_DEVICE(0x15a9, 0x0006), USB_DEVICE_DATA(&rt2800usb_ops) },
1003 /* Sweex */ 1082 /* Sweex */
1004 { USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) }, 1083 { USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) },
1005 { USB_DEVICE(0x177f, 0x0302), USB_DEVICE_DATA(&rt2800usb_ops) },
1006 { USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) }, 1084 { USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) },
1007 /* U-Media*/
1008 { USB_DEVICE(0x157e, 0x300e), USB_DEVICE_DATA(&rt2800usb_ops) },
1009 /* ZCOM */
1010 { USB_DEVICE(0x0cde, 0x0022), USB_DEVICE_DATA(&rt2800usb_ops) },
1011 { USB_DEVICE(0x0cde, 0x0025), USB_DEVICE_DATA(&rt2800usb_ops) },
1012 /* Zinwell */
1013 { USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) },
1014 { USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) },
1015 { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
1016 { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) },
1017 { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
1018 /* Zyxel */ 1085 /* Zyxel */
1019 { USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) },
1020 { USB_DEVICE(0x0586, 0x341a), USB_DEVICE_DATA(&rt2800usb_ops) }, 1086 { USB_DEVICE(0x0586, 0x341a), USB_DEVICE_DATA(&rt2800usb_ops) },
1087#endif
1021 { 0, } 1088 { 0, }
1022}; 1089};
1023 1090
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index 1e4340a182ef..d1d8ae94b4d4 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -79,6 +79,8 @@
79 */ 79 */
80#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) ) 80#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
81#define RXINFO_DESC_SIZE ( 1 * sizeof(__le32) ) 81#define RXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
82#define RXWI_DESC_SIZE ( 4 * sizeof(__le32) )
83#define RXD_DESC_SIZE ( 1 * sizeof(__le32) )
82 84
83/* 85/*
84 * TX Info structure 86 * TX Info structure
@@ -101,6 +103,54 @@
101#define TXINFO_W0_USB_DMA_TX_BURST FIELD32(0x80000000) 103#define TXINFO_W0_USB_DMA_TX_BURST FIELD32(0x80000000)
102 104
103/* 105/*
106 * RX Info structure
107 */
108
109/*
110 * Word 0
111 */
112
113#define RXINFO_W0_USB_DMA_RX_PKT_LEN FIELD32(0x0000ffff)
114
115/*
116 * RX WI structure
117 */
118
119/*
120 * Word0
121 */
122#define RXWI_W0_WIRELESS_CLI_ID FIELD32(0x000000ff)
123#define RXWI_W0_KEY_INDEX FIELD32(0x00000300)
124#define RXWI_W0_BSSID FIELD32(0x00001c00)
125#define RXWI_W0_UDF FIELD32(0x0000e000)
126#define RXWI_W0_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
127#define RXWI_W0_TID FIELD32(0xf0000000)
128
129/*
130 * Word1
131 */
132#define RXWI_W1_FRAG FIELD32(0x0000000f)
133#define RXWI_W1_SEQUENCE FIELD32(0x0000fff0)
134#define RXWI_W1_MCS FIELD32(0x007f0000)
135#define RXWI_W1_BW FIELD32(0x00800000)
136#define RXWI_W1_SHORT_GI FIELD32(0x01000000)
137#define RXWI_W1_STBC FIELD32(0x06000000)
138#define RXWI_W1_PHYMODE FIELD32(0xc0000000)
139
140/*
141 * Word2
142 */
143#define RXWI_W2_RSSI0 FIELD32(0x000000ff)
144#define RXWI_W2_RSSI1 FIELD32(0x0000ff00)
145#define RXWI_W2_RSSI2 FIELD32(0x00ff0000)
146
147/*
148 * Word3
149 */
150#define RXWI_W3_SNR0 FIELD32(0x000000ff)
151#define RXWI_W3_SNR1 FIELD32(0x0000ff00)
152
153/*
104 * RX descriptor format for RX Ring. 154 * RX descriptor format for RX Ring.
105 */ 155 */
106 156
@@ -115,25 +165,25 @@
115 * AMSDU: rx with 802.3 header, not 802.11 header. 165 * AMSDU: rx with 802.3 header, not 802.11 header.
116 */ 166 */
117 167
118#define RXINFO_W0_BA FIELD32(0x00000001) 168#define RXD_W0_BA FIELD32(0x00000001)
119#define RXINFO_W0_DATA FIELD32(0x00000002) 169#define RXD_W0_DATA FIELD32(0x00000002)
120#define RXINFO_W0_NULLDATA FIELD32(0x00000004) 170#define RXD_W0_NULLDATA FIELD32(0x00000004)
121#define RXINFO_W0_FRAG FIELD32(0x00000008) 171#define RXD_W0_FRAG FIELD32(0x00000008)
122#define RXINFO_W0_UNICAST_TO_ME FIELD32(0x00000010) 172#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000010)
123#define RXINFO_W0_MULTICAST FIELD32(0x00000020) 173#define RXD_W0_MULTICAST FIELD32(0x00000020)
124#define RXINFO_W0_BROADCAST FIELD32(0x00000040) 174#define RXD_W0_BROADCAST FIELD32(0x00000040)
125#define RXINFO_W0_MY_BSS FIELD32(0x00000080) 175#define RXD_W0_MY_BSS FIELD32(0x00000080)
126#define RXINFO_W0_CRC_ERROR FIELD32(0x00000100) 176#define RXD_W0_CRC_ERROR FIELD32(0x00000100)
127#define RXINFO_W0_CIPHER_ERROR FIELD32(0x00000600) 177#define RXD_W0_CIPHER_ERROR FIELD32(0x00000600)
128#define RXINFO_W0_AMSDU FIELD32(0x00000800) 178#define RXD_W0_AMSDU FIELD32(0x00000800)
129#define RXINFO_W0_HTC FIELD32(0x00001000) 179#define RXD_W0_HTC FIELD32(0x00001000)
130#define RXINFO_W0_RSSI FIELD32(0x00002000) 180#define RXD_W0_RSSI FIELD32(0x00002000)
131#define RXINFO_W0_L2PAD FIELD32(0x00004000) 181#define RXD_W0_L2PAD FIELD32(0x00004000)
132#define RXINFO_W0_AMPDU FIELD32(0x00008000) 182#define RXD_W0_AMPDU FIELD32(0x00008000)
133#define RXINFO_W0_DECRYPTED FIELD32(0x00010000) 183#define RXD_W0_DECRYPTED FIELD32(0x00010000)
134#define RXINFO_W0_PLCP_RSSI FIELD32(0x00020000) 184#define RXD_W0_PLCP_RSSI FIELD32(0x00020000)
135#define RXINFO_W0_CIPHER_ALG FIELD32(0x00040000) 185#define RXD_W0_CIPHER_ALG FIELD32(0x00040000)
136#define RXINFO_W0_LAST_AMSDU FIELD32(0x00080000) 186#define RXD_W0_LAST_AMSDU FIELD32(0x00080000)
137#define RXINFO_W0_PLCP_SIGNAL FIELD32(0xfff00000) 187#define RXD_W0_PLCP_SIGNAL FIELD32(0xfff00000)
138 188
139#endif /* RT2800USB_H */ 189#endif /* RT2800USB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index dcfc8c25d1a7..d9daa9c406fa 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -104,6 +104,12 @@
104#define GET_DURATION_RES(__size, __rate)(((__size) * 8 * 10) % (__rate)) 104#define GET_DURATION_RES(__size, __rate)(((__size) * 8 * 10) % (__rate))
105 105
106/* 106/*
107 * Determine the number of L2 padding bytes required between the header and
108 * the payload.
109 */
110#define L2PAD_SIZE(__hdrlen) (-(__hdrlen) & 3)
111
112/*
107 * Determine the alignment requirement, 113 * Determine the alignment requirement,
108 * to make sure the 802.11 payload is padded to a 4-byte boundrary 114 * to make sure the 802.11 payload is padded to a 4-byte boundrary
109 * we must determine the address of the payload and calculate the 115 * we must determine the address of the payload and calculate the
@@ -154,6 +160,7 @@ struct avg_val {
154enum rt2x00_chip_intf { 160enum rt2x00_chip_intf {
155 RT2X00_CHIP_INTF_PCI, 161 RT2X00_CHIP_INTF_PCI,
156 RT2X00_CHIP_INTF_USB, 162 RT2X00_CHIP_INTF_USB,
163 RT2X00_CHIP_INTF_SOC,
157}; 164};
158 165
159/* 166/*
@@ -163,25 +170,26 @@ enum rt2x00_chip_intf {
163 */ 170 */
164struct rt2x00_chip { 171struct rt2x00_chip {
165 u16 rt; 172 u16 rt;
166#define RT2460 0x0101 173#define RT2460 0x2460
167#define RT2560 0x0201 174#define RT2560 0x2560
168#define RT2570 0x1201 175#define RT2570 0x2570
169#define RT2561s 0x0301 /* Turbo */ 176#define RT2661 0x2661
170#define RT2561 0x0302 177#define RT2573 0x2573
171#define RT2661 0x0401 178#define RT2860 0x2860 /* 2.4GHz PCI/CB */
172#define RT2571 0x1300 179#define RT2870 0x2870
173#define RT2860 0x0601 /* 2.4GHz PCI/CB */ 180#define RT2872 0x2872
174#define RT2860D 0x0681 /* 2.4GHz, 5GHz PCI/CB */
175#define RT2890 0x0701 /* 2.4GHz PCIe */
176#define RT2890D 0x0781 /* 2.4GHz, 5GHz PCIe */
177#define RT2880 0x2880 /* WSOC */ 181#define RT2880 0x2880 /* WSOC */
182#define RT2883 0x2883 /* WSOC */
183#define RT2890 0x2890 /* 2.4GHz PCIe */
178#define RT3052 0x3052 /* WSOC */ 184#define RT3052 0x3052 /* WSOC */
185#define RT3070 0x3070
186#define RT3071 0x3071
179#define RT3090 0x3090 /* 2.4GHz PCIe */ 187#define RT3090 0x3090 /* 2.4GHz PCIe */
180#define RT2870 0x1600 188#define RT3390 0x3390
181#define RT3070 0x1800 189#define RT3572 0x3572
182 190
183 u16 rf; 191 u16 rf;
184 u32 rev; 192 u16 rev;
185 193
186 enum rt2x00_chip_intf intf; 194 enum rt2x00_chip_intf intf;
187}; 195};
@@ -911,51 +919,30 @@ static inline void rt2x00_eeprom_write(struct rt2x00_dev *rt2x00dev,
911 * Chipset handlers 919 * Chipset handlers
912 */ 920 */
913static inline void rt2x00_set_chip(struct rt2x00_dev *rt2x00dev, 921static inline void rt2x00_set_chip(struct rt2x00_dev *rt2x00dev,
914 const u16 rt, const u16 rf, const u32 rev) 922 const u16 rt, const u16 rf, const u16 rev)
915{ 923{
916 rt2x00dev->chip.rt = rt; 924 rt2x00dev->chip.rt = rt;
917 rt2x00dev->chip.rf = rf; 925 rt2x00dev->chip.rf = rf;
918 rt2x00dev->chip.rev = rev; 926 rt2x00dev->chip.rev = rev;
919}
920
921static inline void rt2x00_set_chip_rt(struct rt2x00_dev *rt2x00dev,
922 const u16 rt)
923{
924 rt2x00dev->chip.rt = rt;
925}
926
927static inline void rt2x00_set_chip_rf(struct rt2x00_dev *rt2x00dev,
928 const u16 rf, const u32 rev)
929{
930 rt2x00_set_chip(rt2x00dev, rt2x00dev->chip.rt, rf, rev);
931}
932 927
933static inline void rt2x00_print_chip(struct rt2x00_dev *rt2x00dev)
934{
935 INFO(rt2x00dev, 928 INFO(rt2x00dev,
936 "Chipset detected - rt: %04x, rf: %04x, rev: %08x.\n", 929 "Chipset detected - rt: %04x, rf: %04x, rev: %04x.\n",
937 rt2x00dev->chip.rt, rt2x00dev->chip.rf, rt2x00dev->chip.rev); 930 rt2x00dev->chip.rt, rt2x00dev->chip.rf, rt2x00dev->chip.rev);
938} 931}
939 932
940static inline char rt2x00_rt(const struct rt2x00_chip *chipset, const u16 chip) 933static inline char rt2x00_rt(struct rt2x00_dev *rt2x00dev, const u16 rt)
941{ 934{
942 return (chipset->rt == chip); 935 return (rt2x00dev->chip.rt == rt);
943} 936}
944 937
945static inline char rt2x00_rf(const struct rt2x00_chip *chipset, const u16 chip) 938static inline char rt2x00_rf(struct rt2x00_dev *rt2x00dev, const u16 rf)
946{ 939{
947 return (chipset->rf == chip); 940 return (rt2x00dev->chip.rf == rf);
948} 941}
949 942
950static inline u32 rt2x00_rev(const struct rt2x00_chip *chipset) 943static inline u16 rt2x00_rev(struct rt2x00_dev *rt2x00dev)
951{ 944{
952 return chipset->rev; 945 return rt2x00dev->chip.rev;
953}
954
955static inline bool rt2x00_check_rev(const struct rt2x00_chip *chipset,
956 const u32 mask, const u32 rev)
957{
958 return ((chipset->rev & mask) == rev);
959} 946}
960 947
961static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev, 948static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev,
@@ -964,20 +951,25 @@ static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev,
964 rt2x00dev->chip.intf = intf; 951 rt2x00dev->chip.intf = intf;
965} 952}
966 953
967static inline bool rt2x00_intf(const struct rt2x00_chip *chipset, 954static inline bool rt2x00_intf(struct rt2x00_dev *rt2x00dev,
968 enum rt2x00_chip_intf intf) 955 enum rt2x00_chip_intf intf)
969{ 956{
970 return (chipset->intf == intf); 957 return (rt2x00dev->chip.intf == intf);
958}
959
960static inline bool rt2x00_is_pci(struct rt2x00_dev *rt2x00dev)
961{
962 return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
971} 963}
972 964
973static inline bool rt2x00_intf_is_pci(struct rt2x00_dev *rt2x00dev) 965static inline bool rt2x00_is_usb(struct rt2x00_dev *rt2x00dev)
974{ 966{
975 return rt2x00_intf(&rt2x00dev->chip, RT2X00_CHIP_INTF_PCI); 967 return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_USB);
976} 968}
977 969
978static inline bool rt2x00_intf_is_usb(struct rt2x00_dev *rt2x00dev) 970static inline bool rt2x00_is_soc(struct rt2x00_dev *rt2x00dev)
979{ 971{
980 return rt2x00_intf(&rt2x00dev->chip, RT2X00_CHIP_INTF_USB); 972 return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC);
981} 973}
982 974
983/** 975/**
@@ -1019,9 +1011,9 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
1019int rt2x00mac_start(struct ieee80211_hw *hw); 1011int rt2x00mac_start(struct ieee80211_hw *hw);
1020void rt2x00mac_stop(struct ieee80211_hw *hw); 1012void rt2x00mac_stop(struct ieee80211_hw *hw);
1021int rt2x00mac_add_interface(struct ieee80211_hw *hw, 1013int rt2x00mac_add_interface(struct ieee80211_hw *hw,
1022 struct ieee80211_if_init_conf *conf); 1014 struct ieee80211_vif *vif);
1023void rt2x00mac_remove_interface(struct ieee80211_hw *hw, 1015void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
1024 struct ieee80211_if_init_conf *conf); 1016 struct ieee80211_vif *vif);
1025int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed); 1017int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed);
1026void rt2x00mac_configure_filter(struct ieee80211_hw *hw, 1018void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
1027 unsigned int changed_flags, 1019 unsigned int changed_flags,
@@ -1038,8 +1030,6 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1038#endif /* CONFIG_RT2X00_LIB_CRYPTO */ 1030#endif /* CONFIG_RT2X00_LIB_CRYPTO */
1039int rt2x00mac_get_stats(struct ieee80211_hw *hw, 1031int rt2x00mac_get_stats(struct ieee80211_hw *hw,
1040 struct ieee80211_low_level_stats *stats); 1032 struct ieee80211_low_level_stats *stats);
1041int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw,
1042 struct ieee80211_tx_queue_stats *stats);
1043void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, 1033void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
1044 struct ieee80211_vif *vif, 1034 struct ieee80211_vif *vif,
1045 struct ieee80211_bss_conf *bss_conf, 1035 struct ieee80211_bss_conf *bss_conf,
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 7d323a763b54..70c04c282efc 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -184,7 +184,7 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
184 dump_hdr->data_length = cpu_to_le32(skb->len); 184 dump_hdr->data_length = cpu_to_le32(skb->len);
185 dump_hdr->chip_rt = cpu_to_le16(rt2x00dev->chip.rt); 185 dump_hdr->chip_rt = cpu_to_le16(rt2x00dev->chip.rt);
186 dump_hdr->chip_rf = cpu_to_le16(rt2x00dev->chip.rf); 186 dump_hdr->chip_rf = cpu_to_le16(rt2x00dev->chip.rf);
187 dump_hdr->chip_rev = cpu_to_le32(rt2x00dev->chip.rev); 187 dump_hdr->chip_rev = cpu_to_le16(rt2x00dev->chip.rev);
188 dump_hdr->type = cpu_to_le16(type); 188 dump_hdr->type = cpu_to_le16(type);
189 dump_hdr->queue_index = desc->entry->queue->qid; 189 dump_hdr->queue_index = desc->entry->queue->qid;
190 dump_hdr->entry_index = desc->entry->entry_idx; 190 dump_hdr->entry_index = desc->entry->entry_idx;
@@ -573,7 +573,7 @@ static struct dentry *rt2x00debug_create_file_chipset(const char *name,
573 blob->data = data; 573 blob->data = data;
574 data += sprintf(data, "rt chip:\t%04x\n", intf->rt2x00dev->chip.rt); 574 data += sprintf(data, "rt chip:\t%04x\n", intf->rt2x00dev->chip.rt);
575 data += sprintf(data, "rf chip:\t%04x\n", intf->rt2x00dev->chip.rf); 575 data += sprintf(data, "rf chip:\t%04x\n", intf->rt2x00dev->chip.rf);
576 data += sprintf(data, "revision:\t%08x\n", intf->rt2x00dev->chip.rev); 576 data += sprintf(data, "revision:\t%04x\n", intf->rt2x00dev->chip.rev);
577 data += sprintf(data, "\n"); 577 data += sprintf(data, "\n");
578 data += sprintf(data, "register\tbase\twords\twordsize\n"); 578 data += sprintf(data, "register\tbase\twords\twordsize\n");
579 data += sprintf(data, "csr\t%d\t%d\t%d\n", 579 data += sprintf(data, "csr\t%d\t%d\t%d\n",
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 265e66dba552..b93731b79903 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -385,9 +385,6 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
385 memset(&rxdesc, 0, sizeof(rxdesc)); 385 memset(&rxdesc, 0, sizeof(rxdesc));
386 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc); 386 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
387 387
388 /* Trim buffer to correct size */
389 skb_trim(entry->skb, rxdesc.size);
390
391 /* 388 /*
392 * The data behind the ieee80211 header must be 389 * The data behind the ieee80211 header must be
393 * aligned on a 4 byte boundary. 390 * aligned on a 4 byte boundary.
@@ -404,11 +401,16 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
404 (rxdesc.flags & RX_FLAG_IV_STRIPPED)) 401 (rxdesc.flags & RX_FLAG_IV_STRIPPED))
405 rt2x00crypto_rx_insert_iv(entry->skb, header_length, 402 rt2x00crypto_rx_insert_iv(entry->skb, header_length,
406 &rxdesc); 403 &rxdesc);
407 else if (rxdesc.dev_flags & RXDONE_L2PAD) 404 else if (header_length &&
405 (rxdesc.size > header_length) &&
406 (rxdesc.dev_flags & RXDONE_L2PAD))
408 rt2x00queue_remove_l2pad(entry->skb, header_length); 407 rt2x00queue_remove_l2pad(entry->skb, header_length);
409 else 408 else
410 rt2x00queue_align_payload(entry->skb, header_length); 409 rt2x00queue_align_payload(entry->skb, header_length);
411 410
411 /* Trim buffer to correct size */
412 skb_trim(entry->skb, rxdesc.size);
413
412 /* 414 /*
413 * Check if the frame was received using HT. In that case, 415 * Check if the frame was received using HT. In that case,
414 * the rate is the MCS index and should be passed to mac80211 416 * the rate is the MCS index and should be passed to mac80211
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index de549c244ed8..abbd857ec759 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -187,10 +187,10 @@ void rt2x00mac_stop(struct ieee80211_hw *hw)
187EXPORT_SYMBOL_GPL(rt2x00mac_stop); 187EXPORT_SYMBOL_GPL(rt2x00mac_stop);
188 188
189int rt2x00mac_add_interface(struct ieee80211_hw *hw, 189int rt2x00mac_add_interface(struct ieee80211_hw *hw,
190 struct ieee80211_if_init_conf *conf) 190 struct ieee80211_vif *vif)
191{ 191{
192 struct rt2x00_dev *rt2x00dev = hw->priv; 192 struct rt2x00_dev *rt2x00dev = hw->priv;
193 struct rt2x00_intf *intf = vif_to_intf(conf->vif); 193 struct rt2x00_intf *intf = vif_to_intf(vif);
194 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON); 194 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON);
195 struct queue_entry *entry = NULL; 195 struct queue_entry *entry = NULL;
196 unsigned int i; 196 unsigned int i;
@@ -203,7 +203,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
203 !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) 203 !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
204 return -ENODEV; 204 return -ENODEV;
205 205
206 switch (conf->type) { 206 switch (vif->type) {
207 case NL80211_IFTYPE_AP: 207 case NL80211_IFTYPE_AP:
208 /* 208 /*
209 * We don't support mixed combinations of 209 * We don't support mixed combinations of
@@ -263,7 +263,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
263 * increase interface count and start initialization. 263 * increase interface count and start initialization.
264 */ 264 */
265 265
266 if (conf->type == NL80211_IFTYPE_AP) 266 if (vif->type == NL80211_IFTYPE_AP)
267 rt2x00dev->intf_ap_count++; 267 rt2x00dev->intf_ap_count++;
268 else 268 else
269 rt2x00dev->intf_sta_count++; 269 rt2x00dev->intf_sta_count++;
@@ -273,16 +273,16 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
273 mutex_init(&intf->beacon_skb_mutex); 273 mutex_init(&intf->beacon_skb_mutex);
274 intf->beacon = entry; 274 intf->beacon = entry;
275 275
276 if (conf->type == NL80211_IFTYPE_AP) 276 if (vif->type == NL80211_IFTYPE_AP)
277 memcpy(&intf->bssid, conf->mac_addr, ETH_ALEN); 277 memcpy(&intf->bssid, vif->addr, ETH_ALEN);
278 memcpy(&intf->mac, conf->mac_addr, ETH_ALEN); 278 memcpy(&intf->mac, vif->addr, ETH_ALEN);
279 279
280 /* 280 /*
281 * The MAC adddress must be configured after the device 281 * The MAC adddress must be configured after the device
282 * has been initialized. Otherwise the device can reset 282 * has been initialized. Otherwise the device can reset
283 * the MAC registers. 283 * the MAC registers.
284 */ 284 */
285 rt2x00lib_config_intf(rt2x00dev, intf, conf->type, intf->mac, NULL); 285 rt2x00lib_config_intf(rt2x00dev, intf, vif->type, intf->mac, NULL);
286 286
287 /* 287 /*
288 * Some filters depend on the current working mode. We can force 288 * Some filters depend on the current working mode. We can force
@@ -296,10 +296,10 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
296EXPORT_SYMBOL_GPL(rt2x00mac_add_interface); 296EXPORT_SYMBOL_GPL(rt2x00mac_add_interface);
297 297
298void rt2x00mac_remove_interface(struct ieee80211_hw *hw, 298void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
299 struct ieee80211_if_init_conf *conf) 299 struct ieee80211_vif *vif)
300{ 300{
301 struct rt2x00_dev *rt2x00dev = hw->priv; 301 struct rt2x00_dev *rt2x00dev = hw->priv;
302 struct rt2x00_intf *intf = vif_to_intf(conf->vif); 302 struct rt2x00_intf *intf = vif_to_intf(vif);
303 303
304 /* 304 /*
305 * Don't allow interfaces to be remove while 305 * Don't allow interfaces to be remove while
@@ -307,11 +307,11 @@ void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
307 * no interface is present. 307 * no interface is present.
308 */ 308 */
309 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) || 309 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
310 (conf->type == NL80211_IFTYPE_AP && !rt2x00dev->intf_ap_count) || 310 (vif->type == NL80211_IFTYPE_AP && !rt2x00dev->intf_ap_count) ||
311 (conf->type != NL80211_IFTYPE_AP && !rt2x00dev->intf_sta_count)) 311 (vif->type != NL80211_IFTYPE_AP && !rt2x00dev->intf_sta_count))
312 return; 312 return;
313 313
314 if (conf->type == NL80211_IFTYPE_AP) 314 if (vif->type == NL80211_IFTYPE_AP)
315 rt2x00dev->intf_ap_count--; 315 rt2x00dev->intf_ap_count--;
316 else 316 else
317 rt2x00dev->intf_sta_count--; 317 rt2x00dev->intf_sta_count--;
@@ -555,22 +555,6 @@ int rt2x00mac_get_stats(struct ieee80211_hw *hw,
555} 555}
556EXPORT_SYMBOL_GPL(rt2x00mac_get_stats); 556EXPORT_SYMBOL_GPL(rt2x00mac_get_stats);
557 557
558int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw,
559 struct ieee80211_tx_queue_stats *stats)
560{
561 struct rt2x00_dev *rt2x00dev = hw->priv;
562 unsigned int i;
563
564 for (i = 0; i < rt2x00dev->ops->tx_queues; i++) {
565 stats[i].len = rt2x00dev->tx[i].length;
566 stats[i].limit = rt2x00dev->tx[i].limit;
567 stats[i].count = rt2x00dev->tx[i].count;
568 }
569
570 return 0;
571}
572EXPORT_SYMBOL_GPL(rt2x00mac_get_tx_stats);
573
574void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, 558void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
575 struct ieee80211_vif *vif, 559 struct ieee80211_vif *vif,
576 struct ieee80211_bss_conf *bss_conf, 560 struct ieee80211_bss_conf *bss_conf,
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 0feb4d0e4668..047123b766fc 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -41,6 +41,9 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
41{ 41{
42 unsigned int i; 42 unsigned int i;
43 43
44 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
45 return 0;
46
44 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 47 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
45 rt2x00pci_register_read(rt2x00dev, offset, reg); 48 rt2x00pci_register_read(rt2x00dev, offset, reg);
46 if (!rt2x00_get_field32(*reg, field)) 49 if (!rt2x00_get_field32(*reg, field))
@@ -269,7 +272,6 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
269 struct ieee80211_hw *hw; 272 struct ieee80211_hw *hw;
270 struct rt2x00_dev *rt2x00dev; 273 struct rt2x00_dev *rt2x00dev;
271 int retval; 274 int retval;
272 u16 chip;
273 275
274 retval = pci_request_regions(pci_dev, pci_name(pci_dev)); 276 retval = pci_request_regions(pci_dev, pci_name(pci_dev));
275 if (retval) { 277 if (retval) {
@@ -312,12 +314,6 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
312 314
313 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI); 315 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
314 316
315 /*
316 * Determine RT chipset by reading PCI header.
317 */
318 pci_read_config_word(pci_dev, PCI_DEVICE_ID, &chip);
319 rt2x00_set_chip_rt(rt2x00dev, chip);
320
321 retval = rt2x00pci_alloc_reg(rt2x00dev); 317 retval = rt2x00pci_alloc_reg(rt2x00dev);
322 if (retval) 318 if (retval)
323 goto exit_free_device; 319 goto exit_free_device;
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index d4f9449ab0a4..8149ff68410a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -27,6 +27,7 @@
27#define RT2X00PCI_H 27#define RT2X00PCI_H
28 28
29#include <linux/io.h> 29#include <linux/io.h>
30#include <linux/pci.h>
30 31
31/* 32/*
32 * This variable should be used with the 33 * This variable should be used with the
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 9915a09141ef..0b4801a14601 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -177,55 +177,45 @@ void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length)
177 177
178void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) 178void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
179{ 179{
180 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 180 unsigned int payload_length = skb->len - header_length;
181 unsigned int frame_length = skb->len;
182 unsigned int header_align = ALIGN_SIZE(skb, 0); 181 unsigned int header_align = ALIGN_SIZE(skb, 0);
183 unsigned int payload_align = ALIGN_SIZE(skb, header_length); 182 unsigned int payload_align = ALIGN_SIZE(skb, header_length);
184 unsigned int l2pad = 4 - (payload_align - header_align); 183 unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
185 184
186 if (header_align == payload_align) { 185 /*
187 /* 186 * Adjust the header alignment if the payload needs to be moved more
188 * Both header and payload must be moved the same 187 * than the header.
189 * amount of bytes to align them properly. This means 188 */
190 * we don't use the L2 padding but just move the entire 189 if (payload_align > header_align)
191 * frame. 190 header_align += 4;
192 */ 191
193 rt2x00queue_align_frame(skb); 192 /* There is nothing to do if no alignment is needed */
194 } else if (!payload_align) { 193 if (!header_align)
195 /* 194 return;
196 * Simple L2 padding, only the header needs to be moved, 195
197 * the payload is already properly aligned. 196 /* Reserve the amount of space needed in front of the frame */
198 */ 197 skb_push(skb, header_align);
199 skb_push(skb, header_align); 198
200 memmove(skb->data, skb->data + header_align, frame_length); 199 /*
201 skbdesc->flags |= SKBDESC_L2_PADDED; 200 * Move the header.
202 } else { 201 */
203 /* 202 memmove(skb->data, skb->data + header_align, header_length);
204 *
205 * Complicated L2 padding, both header and payload need
206 * to be moved. By default we only move to the start
207 * of the buffer, so our header alignment needs to be
208 * increased if there is not enough room for the header
209 * to be moved.
210 */
211 if (payload_align > header_align)
212 header_align += 4;
213 203
214 skb_push(skb, header_align); 204 /* Move the payload, if present and if required */
215 memmove(skb->data, skb->data + header_align, header_length); 205 if (payload_length && payload_align)
216 memmove(skb->data + header_length + l2pad, 206 memmove(skb->data + header_length + l2pad,
217 skb->data + header_length + l2pad + payload_align, 207 skb->data + header_length + l2pad + payload_align,
218 frame_length - header_length); 208 payload_length);
219 skbdesc->flags |= SKBDESC_L2_PADDED; 209
220 } 210 /* Trim the skb to the correct size */
211 skb_trim(skb, header_length + l2pad + payload_length);
221} 212}
222 213
223void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length) 214void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
224{ 215{
225 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 216 unsigned int l2pad = L2PAD_SIZE(header_length);
226 unsigned int l2pad = 4 - (header_length & 3);
227 217
228 if (!l2pad || (skbdesc->flags & SKBDESC_L2_PADDED)) 218 if (!l2pad)
229 return; 219 return;
230 220
231 memmove(skb->data + l2pad, skb->data, header_length); 221 memmove(skb->data + l2pad, skb->data, header_length);
@@ -346,7 +336,9 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
346 * Header and alignment information. 336 * Header and alignment information.
347 */ 337 */
348 txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb); 338 txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
349 txdesc->l2pad = ALIGN_SIZE(entry->skb, txdesc->header_length); 339 if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags) &&
340 (entry->skb->len > txdesc->header_length))
341 txdesc->l2pad = L2PAD_SIZE(txdesc->header_length);
350 342
351 /* 343 /*
352 * Check whether this frame is to be acked. 344 * Check whether this frame is to be acked.
@@ -387,10 +379,13 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
387 379
388 /* 380 /*
389 * Beacons and probe responses require the tsf timestamp 381 * Beacons and probe responses require the tsf timestamp
390 * to be inserted into the frame. 382 * to be inserted into the frame, except for a frame that has been injected
383 * through a monitor interface. This latter is needed for testing a
384 * monitor interface.
391 */ 385 */
392 if (ieee80211_is_beacon(hdr->frame_control) || 386 if ((ieee80211_is_beacon(hdr->frame_control) ||
393 ieee80211_is_probe_resp(hdr->frame_control)) 387 ieee80211_is_probe_resp(hdr->frame_control)) &&
388 (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED)))
394 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); 389 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
395 390
396 /* 391 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 70775e5ba1ac..c1e482bb37b3 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -92,8 +92,6 @@ enum data_queue_qid {
92 * @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX 92 * @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX
93 * @SKBDESC_IV_STRIPPED: Frame contained a IV/EIV provided by 93 * @SKBDESC_IV_STRIPPED: Frame contained a IV/EIV provided by
94 * mac80211 but was stripped for processing by the driver. 94 * mac80211 but was stripped for processing by the driver.
95 * @SKBDESC_L2_PADDED: Payload has been padded for 4-byte alignment,
96 * the padded bytes are located between header and payload.
97 * @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211, 95 * @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211,
98 * don't try to pass it back. 96 * don't try to pass it back.
99 */ 97 */
@@ -101,8 +99,7 @@ enum skb_frame_desc_flags {
101 SKBDESC_DMA_MAPPED_RX = 1 << 0, 99 SKBDESC_DMA_MAPPED_RX = 1 << 0,
102 SKBDESC_DMA_MAPPED_TX = 1 << 1, 100 SKBDESC_DMA_MAPPED_TX = 1 << 1,
103 SKBDESC_IV_STRIPPED = 1 << 2, 101 SKBDESC_IV_STRIPPED = 1 << 2,
104 SKBDESC_L2_PADDED = 1 << 3, 102 SKBDESC_NOT_MAC80211 = 1 << 3,
105 SKBDESC_NOT_MAC80211 = 1 << 4,
106}; 103};
107 104
108/** 105/**
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.c b/drivers/net/wireless/rt2x00/rt2x00soc.c
index 19e684f8ffa1..4efdc96010f6 100644
--- a/drivers/net/wireless/rt2x00/rt2x00soc.c
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.c
@@ -71,9 +71,7 @@ exit:
71 return -ENOMEM; 71 return -ENOMEM;
72} 72}
73 73
74int rt2x00soc_probe(struct platform_device *pdev, 74int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops)
75 const unsigned short chipset,
76 const struct rt2x00_ops *ops)
77{ 75{
78 struct ieee80211_hw *hw; 76 struct ieee80211_hw *hw;
79 struct rt2x00_dev *rt2x00dev; 77 struct rt2x00_dev *rt2x00dev;
@@ -94,12 +92,7 @@ int rt2x00soc_probe(struct platform_device *pdev,
94 rt2x00dev->irq = platform_get_irq(pdev, 0); 92 rt2x00dev->irq = platform_get_irq(pdev, 0);
95 rt2x00dev->name = pdev->dev.driver->name; 93 rt2x00dev->name = pdev->dev.driver->name;
96 94
97 /* 95 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC);
98 * SoC devices mimic PCI behavior.
99 */
100 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
101
102 rt2x00_set_chip_rt(rt2x00dev, chipset);
103 96
104 retval = rt2x00soc_alloc_reg(rt2x00dev); 97 retval = rt2x00soc_alloc_reg(rt2x00dev);
105 if (retval) 98 if (retval)
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.h b/drivers/net/wireless/rt2x00/rt2x00soc.h
index 8a3416624af5..4739edfe2f00 100644
--- a/drivers/net/wireless/rt2x00/rt2x00soc.h
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.h
@@ -28,18 +28,10 @@
28 28
29#define KSEG1ADDR(__ptr) __ptr 29#define KSEG1ADDR(__ptr) __ptr
30 30
31#define __rt2x00soc_probe(__chipset, __ops) \
32static int __rt2x00soc_probe(struct platform_device *pdev) \
33{ \
34 return rt2x00soc_probe(pdev, (__chipset), (__ops)); \
35}
36
37/* 31/*
38 * SoC driver handlers. 32 * SoC driver handlers.
39 */ 33 */
40int rt2x00soc_probe(struct platform_device *pdev, 34int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops);
41 const unsigned short chipset,
42 const struct rt2x00_ops *ops);
43int rt2x00soc_remove(struct platform_device *pdev); 35int rt2x00soc_remove(struct platform_device *pdev);
44#ifdef CONFIG_PM 36#ifdef CONFIG_PM
45int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state); 37int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state);
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 0ca589306d71..e2da928dd9f0 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -637,8 +637,7 @@ static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
637 rt61pci_bbp_read(rt2x00dev, 4, &r4); 637 rt61pci_bbp_read(rt2x00dev, 4, &r4);
638 rt61pci_bbp_read(rt2x00dev, 77, &r77); 638 rt61pci_bbp_read(rt2x00dev, 77, &r77);
639 639
640 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 640 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF5325));
641 rt2x00_rf(&rt2x00dev->chip, RF5325));
642 641
643 /* 642 /*
644 * Configure the RX antenna. 643 * Configure the RX antenna.
@@ -684,8 +683,7 @@ static void rt61pci_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
684 rt61pci_bbp_read(rt2x00dev, 4, &r4); 683 rt61pci_bbp_read(rt2x00dev, 4, &r4);
685 rt61pci_bbp_read(rt2x00dev, 77, &r77); 684 rt61pci_bbp_read(rt2x00dev, 77, &r77);
686 685
687 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 686 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF2529));
688 rt2x00_rf(&rt2x00dev->chip, RF2529));
689 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 687 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
690 !test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags)); 688 !test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags));
691 689
@@ -833,12 +831,11 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
833 831
834 rt2x00pci_register_write(rt2x00dev, PHY_CSR0, reg); 832 rt2x00pci_register_write(rt2x00dev, PHY_CSR0, reg);
835 833
836 if (rt2x00_rf(&rt2x00dev->chip, RF5225) || 834 if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF5325))
837 rt2x00_rf(&rt2x00dev->chip, RF5325))
838 rt61pci_config_antenna_5x(rt2x00dev, ant); 835 rt61pci_config_antenna_5x(rt2x00dev, ant);
839 else if (rt2x00_rf(&rt2x00dev->chip, RF2527)) 836 else if (rt2x00_rf(rt2x00dev, RF2527))
840 rt61pci_config_antenna_2x(rt2x00dev, ant); 837 rt61pci_config_antenna_2x(rt2x00dev, ant);
841 else if (rt2x00_rf(&rt2x00dev->chip, RF2529)) { 838 else if (rt2x00_rf(rt2x00dev, RF2529)) {
842 if (test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags)) 839 if (test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags))
843 rt61pci_config_antenna_2x(rt2x00dev, ant); 840 rt61pci_config_antenna_2x(rt2x00dev, ant);
844 else 841 else
@@ -879,8 +876,7 @@ static void rt61pci_config_channel(struct rt2x00_dev *rt2x00dev,
879 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); 876 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
880 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset); 877 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
881 878
882 smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) || 879 smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527));
883 rt2x00_rf(&rt2x00dev->chip, RF2527));
884 880
885 rt61pci_bbp_read(rt2x00dev, 3, &r3); 881 rt61pci_bbp_read(rt2x00dev, 3, &r3);
886 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart); 882 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
@@ -1135,16 +1131,18 @@ dynamic_cca_tune:
1135 */ 1131 */
1136static char *rt61pci_get_firmware_name(struct rt2x00_dev *rt2x00dev) 1132static char *rt61pci_get_firmware_name(struct rt2x00_dev *rt2x00dev)
1137{ 1133{
1134 u16 chip;
1138 char *fw_name; 1135 char *fw_name;
1139 1136
1140 switch (rt2x00dev->chip.rt) { 1137 pci_read_config_word(to_pci_dev(rt2x00dev->dev), PCI_DEVICE_ID, &chip);
1141 case RT2561: 1138 switch (chip) {
1139 case RT2561_PCI_ID:
1142 fw_name = FIRMWARE_RT2561; 1140 fw_name = FIRMWARE_RT2561;
1143 break; 1141 break;
1144 case RT2561s: 1142 case RT2561s_PCI_ID:
1145 fw_name = FIRMWARE_RT2561s; 1143 fw_name = FIRMWARE_RT2561s;
1146 break; 1144 break;
1147 case RT2661: 1145 case RT2661_PCI_ID:
1148 fw_name = FIRMWARE_RT2661; 1146 fw_name = FIRMWARE_RT2661;
1149 break; 1147 break;
1150 default: 1148 default:
@@ -2299,13 +2297,13 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2299 */ 2297 */
2300 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); 2298 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
2301 rt2x00pci_register_read(rt2x00dev, MAC_CSR0, &reg); 2299 rt2x00pci_register_read(rt2x00dev, MAC_CSR0, &reg);
2302 rt2x00_set_chip_rf(rt2x00dev, value, reg); 2300 rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
2303 rt2x00_print_chip(rt2x00dev); 2301 value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
2304 2302
2305 if (!rt2x00_rf(&rt2x00dev->chip, RF5225) && 2303 if (!rt2x00_rf(rt2x00dev, RF5225) &&
2306 !rt2x00_rf(&rt2x00dev->chip, RF5325) && 2304 !rt2x00_rf(rt2x00dev, RF5325) &&
2307 !rt2x00_rf(&rt2x00dev->chip, RF2527) && 2305 !rt2x00_rf(rt2x00dev, RF2527) &&
2308 !rt2x00_rf(&rt2x00dev->chip, RF2529)) { 2306 !rt2x00_rf(rt2x00dev, RF2529)) {
2309 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 2307 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
2310 return -ENODEV; 2308 return -ENODEV;
2311 } 2309 }
@@ -2360,7 +2358,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2360 * the antenna settings should be gathered from the NIC 2358 * the antenna settings should be gathered from the NIC
2361 * eeprom word. 2359 * eeprom word.
2362 */ 2360 */
2363 if (rt2x00_rf(&rt2x00dev->chip, RF2529) && 2361 if (rt2x00_rf(rt2x00dev, RF2529) &&
2364 !test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags)) { 2362 !test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags)) {
2365 rt2x00dev->default_ant.rx = 2363 rt2x00dev->default_ant.rx =
2366 ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED); 2364 ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED);
@@ -2571,8 +2569,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2571 spec->channels = rf_vals_seq; 2569 spec->channels = rf_vals_seq;
2572 } 2570 }
2573 2571
2574 if (rt2x00_rf(&rt2x00dev->chip, RF5225) || 2572 if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF5325)) {
2575 rt2x00_rf(&rt2x00dev->chip, RF5325)) {
2576 spec->supported_bands |= SUPPORT_BAND_5GHZ; 2573 spec->supported_bands |= SUPPORT_BAND_5GHZ;
2577 spec->num_channels = ARRAY_SIZE(rf_vals_seq); 2574 spec->num_channels = ARRAY_SIZE(rf_vals_seq);
2578 } 2575 }
@@ -2735,7 +2732,6 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
2735 .get_stats = rt2x00mac_get_stats, 2732 .get_stats = rt2x00mac_get_stats,
2736 .bss_info_changed = rt2x00mac_bss_info_changed, 2733 .bss_info_changed = rt2x00mac_bss_info_changed,
2737 .conf_tx = rt61pci_conf_tx, 2734 .conf_tx = rt61pci_conf_tx,
2738 .get_tx_stats = rt2x00mac_get_tx_stats,
2739 .get_tsf = rt61pci_get_tsf, 2735 .get_tsf = rt61pci_get_tsf,
2740 .rfkill_poll = rt2x00mac_rfkill_poll, 2736 .rfkill_poll = rt2x00mac_rfkill_poll,
2741}; 2737};
@@ -2812,7 +2808,7 @@ static const struct rt2x00_ops rt61pci_ops = {
2812/* 2808/*
2813 * RT61pci module information. 2809 * RT61pci module information.
2814 */ 2810 */
2815static struct pci_device_id rt61pci_device_table[] = { 2811static DEFINE_PCI_DEVICE_TABLE(rt61pci_device_table) = {
2816 /* RT2561s */ 2812 /* RT2561s */
2817 { PCI_DEVICE(0x1814, 0x0301), PCI_DEVICE_DATA(&rt61pci_ops) }, 2813 { PCI_DEVICE(0x1814, 0x0301), PCI_DEVICE_DATA(&rt61pci_ops) },
2818 /* RT2561 v2 */ 2814 /* RT2561 v2 */
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index 8f13810622bd..df80f1af22a4 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -28,6 +28,13 @@
28#define RT61PCI_H 28#define RT61PCI_H
29 29
30/* 30/*
31 * RT chip PCI IDs.
32 */
33#define RT2561s_PCI_ID 0x0301
34#define RT2561_PCI_ID 0x0302
35#define RT2661_PCI_ID 0x0401
36
37/*
31 * RF chip defines. 38 * RF chip defines.
32 */ 39 */
33#define RF5225 0x0001 40#define RF5225 0x0001
@@ -225,6 +232,8 @@ struct hw_pairwise_ta_entry {
225 * MAC_CSR0: ASIC revision number. 232 * MAC_CSR0: ASIC revision number.
226 */ 233 */
227#define MAC_CSR0 0x3000 234#define MAC_CSR0 0x3000
235#define MAC_CSR0_REVISION FIELD32(0x0000000f)
236#define MAC_CSR0_CHIPSET FIELD32(0x000ffff0)
228 237
229/* 238/*
230 * MAC_CSR1: System control register. 239 * MAC_CSR1: System control register.
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index ced3b6ab5e16..f39a8ed17841 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -136,8 +136,8 @@ static void rt73usb_rf_write(struct rt2x00_dev *rt2x00dev,
136 * all others contain 20 bits. 136 * all others contain 20 bits.
137 */ 137 */
138 rt2x00_set_field32(&reg, PHY_CSR4_NUMBER_OF_BITS, 138 rt2x00_set_field32(&reg, PHY_CSR4_NUMBER_OF_BITS,
139 20 + (rt2x00_rf(&rt2x00dev->chip, RF5225) || 139 20 + (rt2x00_rf(rt2x00dev, RF5225) ||
140 rt2x00_rf(&rt2x00dev->chip, RF2527))); 140 rt2x00_rf(rt2x00dev, RF2527)));
141 rt2x00_set_field32(&reg, PHY_CSR4_IF_SELECT, 0); 141 rt2x00_set_field32(&reg, PHY_CSR4_IF_SELECT, 0);
142 rt2x00_set_field32(&reg, PHY_CSR4_BUSY, 1); 142 rt2x00_set_field32(&reg, PHY_CSR4_BUSY, 1);
143 143
@@ -741,11 +741,9 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
741 741
742 rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg); 742 rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg);
743 743
744 if (rt2x00_rf(&rt2x00dev->chip, RF5226) || 744 if (rt2x00_rf(rt2x00dev, RF5226) || rt2x00_rf(rt2x00dev, RF5225))
745 rt2x00_rf(&rt2x00dev->chip, RF5225))
746 rt73usb_config_antenna_5x(rt2x00dev, ant); 745 rt73usb_config_antenna_5x(rt2x00dev, ant);
747 else if (rt2x00_rf(&rt2x00dev->chip, RF2528) || 746 else if (rt2x00_rf(rt2x00dev, RF2528) || rt2x00_rf(rt2x00dev, RF2527))
748 rt2x00_rf(&rt2x00dev->chip, RF2527))
749 rt73usb_config_antenna_2x(rt2x00dev, ant); 747 rt73usb_config_antenna_2x(rt2x00dev, ant);
750} 748}
751 749
@@ -779,8 +777,7 @@ static void rt73usb_config_channel(struct rt2x00_dev *rt2x00dev,
779 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); 777 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
780 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset); 778 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
781 779
782 smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) || 780 smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527));
783 rt2x00_rf(&rt2x00dev->chip, RF2527));
784 781
785 rt73usb_bbp_read(rt2x00dev, 3, &r3); 782 rt73usb_bbp_read(rt2x00dev, 3, &r3);
786 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart); 783 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
@@ -1210,8 +1207,7 @@ static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev)
1210 rt2x00usb_register_write(rt2x00dev, SEC_CSR5, 0x00000000); 1207 rt2x00usb_register_write(rt2x00dev, SEC_CSR5, 0x00000000);
1211 1208
1212 reg = 0x000023b0; 1209 reg = 0x000023b0;
1213 if (rt2x00_rf(&rt2x00dev->chip, RF5225) || 1210 if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527))
1214 rt2x00_rf(&rt2x00dev->chip, RF2527))
1215 rt2x00_set_field32(&reg, PHY_CSR1_RF_RPI, 1); 1211 rt2x00_set_field32(&reg, PHY_CSR1_RF_RPI, 1);
1216 rt2x00usb_register_write(rt2x00dev, PHY_CSR1, reg); 1212 rt2x00usb_register_write(rt2x00dev, PHY_CSR1, reg);
1217 1213
@@ -1824,19 +1820,18 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1824 */ 1820 */
1825 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); 1821 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
1826 rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg); 1822 rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg);
1827 rt2x00_set_chip(rt2x00dev, RT2571, value, reg); 1823 rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
1828 rt2x00_print_chip(rt2x00dev); 1824 value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
1829 1825
1830 if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0x25730) || 1826 if (!rt2x00_rt(rt2x00dev, RT2573) || (rt2x00_rev(rt2x00dev) == 0)) {
1831 rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) {
1832 ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); 1827 ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
1833 return -ENODEV; 1828 return -ENODEV;
1834 } 1829 }
1835 1830
1836 if (!rt2x00_rf(&rt2x00dev->chip, RF5226) && 1831 if (!rt2x00_rf(rt2x00dev, RF5226) &&
1837 !rt2x00_rf(&rt2x00dev->chip, RF2528) && 1832 !rt2x00_rf(rt2x00dev, RF2528) &&
1838 !rt2x00_rf(&rt2x00dev->chip, RF5225) && 1833 !rt2x00_rf(rt2x00dev, RF5225) &&
1839 !rt2x00_rf(&rt2x00dev->chip, RF2527)) { 1834 !rt2x00_rf(rt2x00dev, RF2527)) {
1840 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 1835 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
1841 return -ENODEV; 1836 return -ENODEV;
1842 } 1837 }
@@ -2081,17 +2076,17 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2081 spec->supported_bands = SUPPORT_BAND_2GHZ; 2076 spec->supported_bands = SUPPORT_BAND_2GHZ;
2082 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 2077 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
2083 2078
2084 if (rt2x00_rf(&rt2x00dev->chip, RF2528)) { 2079 if (rt2x00_rf(rt2x00dev, RF2528)) {
2085 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528); 2080 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528);
2086 spec->channels = rf_vals_bg_2528; 2081 spec->channels = rf_vals_bg_2528;
2087 } else if (rt2x00_rf(&rt2x00dev->chip, RF5226)) { 2082 } else if (rt2x00_rf(rt2x00dev, RF5226)) {
2088 spec->supported_bands |= SUPPORT_BAND_5GHZ; 2083 spec->supported_bands |= SUPPORT_BAND_5GHZ;
2089 spec->num_channels = ARRAY_SIZE(rf_vals_5226); 2084 spec->num_channels = ARRAY_SIZE(rf_vals_5226);
2090 spec->channels = rf_vals_5226; 2085 spec->channels = rf_vals_5226;
2091 } else if (rt2x00_rf(&rt2x00dev->chip, RF2527)) { 2086 } else if (rt2x00_rf(rt2x00dev, RF2527)) {
2092 spec->num_channels = 14; 2087 spec->num_channels = 14;
2093 spec->channels = rf_vals_5225_2527; 2088 spec->channels = rf_vals_5225_2527;
2094 } else if (rt2x00_rf(&rt2x00dev->chip, RF5225)) { 2089 } else if (rt2x00_rf(rt2x00dev, RF5225)) {
2095 spec->supported_bands |= SUPPORT_BAND_5GHZ; 2090 spec->supported_bands |= SUPPORT_BAND_5GHZ;
2096 spec->num_channels = ARRAY_SIZE(rf_vals_5225_2527); 2091 spec->num_channels = ARRAY_SIZE(rf_vals_5225_2527);
2097 spec->channels = rf_vals_5225_2527; 2092 spec->channels = rf_vals_5225_2527;
@@ -2249,7 +2244,6 @@ static const struct ieee80211_ops rt73usb_mac80211_ops = {
2249 .get_stats = rt2x00mac_get_stats, 2244 .get_stats = rt2x00mac_get_stats,
2250 .bss_info_changed = rt2x00mac_bss_info_changed, 2245 .bss_info_changed = rt2x00mac_bss_info_changed,
2251 .conf_tx = rt73usb_conf_tx, 2246 .conf_tx = rt73usb_conf_tx,
2252 .get_tx_stats = rt2x00mac_get_tx_stats,
2253 .get_tsf = rt73usb_get_tsf, 2247 .get_tsf = rt73usb_get_tsf,
2254 .rfkill_poll = rt2x00mac_rfkill_poll, 2248 .rfkill_poll = rt2x00mac_rfkill_poll,
2255}; 2249};
@@ -2354,6 +2348,7 @@ static struct usb_device_id rt73usb_device_table[] = {
2354 { USB_DEVICE(0x08dd, 0x0120), USB_DEVICE_DATA(&rt73usb_ops) }, 2348 { USB_DEVICE(0x08dd, 0x0120), USB_DEVICE_DATA(&rt73usb_ops) },
2355 /* Buffalo */ 2349 /* Buffalo */
2356 { USB_DEVICE(0x0411, 0x00d8), USB_DEVICE_DATA(&rt73usb_ops) }, 2350 { USB_DEVICE(0x0411, 0x00d8), USB_DEVICE_DATA(&rt73usb_ops) },
2351 { USB_DEVICE(0x0411, 0x00d9), USB_DEVICE_DATA(&rt73usb_ops) },
2357 { USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) }, 2352 { USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) },
2358 { USB_DEVICE(0x0411, 0x0116), USB_DEVICE_DATA(&rt73usb_ops) }, 2353 { USB_DEVICE(0x0411, 0x0116), USB_DEVICE_DATA(&rt73usb_ops) },
2359 { USB_DEVICE(0x0411, 0x0119), USB_DEVICE_DATA(&rt73usb_ops) }, 2354 { USB_DEVICE(0x0411, 0x0119), USB_DEVICE_DATA(&rt73usb_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 7942f810e928..7abe7eb14555 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -142,6 +142,8 @@ struct hw_pairwise_ta_entry {
142 * MAC_CSR0: ASIC revision number. 142 * MAC_CSR0: ASIC revision number.
143 */ 143 */
144#define MAC_CSR0 0x3000 144#define MAC_CSR0 0x3000
145#define MAC_CSR0_REVISION FIELD32(0x0000000f)
146#define MAC_CSR0_CHIPSET FIELD32(0x000ffff0)
145 147
146/* 148/*
147 * MAC_CSR1: System control register. 149 * MAC_CSR1: System control register.
diff --git a/drivers/net/wireless/rtl818x/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180.h
index 8721282a8185..de3844fe06d8 100644
--- a/drivers/net/wireless/rtl818x/rtl8180.h
+++ b/drivers/net/wireless/rtl818x/rtl8180.h
@@ -60,7 +60,6 @@ struct rtl8180_priv {
60 struct rtl818x_csr __iomem *map; 60 struct rtl818x_csr __iomem *map;
61 const struct rtl818x_rf_ops *rf; 61 const struct rtl818x_rf_ops *rf;
62 struct ieee80211_vif *vif; 62 struct ieee80211_vif *vif;
63 int mode;
64 63
65 /* rtl8180 driver specific */ 64 /* rtl8180 driver specific */
66 spinlock_t lock; 65 spinlock_t lock;
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 8a40a1439984..2b928ecf47bd 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -33,7 +33,7 @@ MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
33MODULE_DESCRIPTION("RTL8180 / RTL8185 PCI wireless driver"); 33MODULE_DESCRIPTION("RTL8180 / RTL8185 PCI wireless driver");
34MODULE_LICENSE("GPL"); 34MODULE_LICENSE("GPL");
35 35
36static struct pci_device_id rtl8180_table[] __devinitdata = { 36static DEFINE_PCI_DEVICE_TABLE(rtl8180_table) = {
37 /* rtl8185 */ 37 /* rtl8185 */
38 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8185) }, 38 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8185) },
39 { PCI_DEVICE(PCI_VENDOR_ID_BELKIN, 0x700f) }, 39 { PCI_DEVICE(PCI_VENDOR_ID_BELKIN, 0x700f) },
@@ -82,8 +82,6 @@ static const struct ieee80211_channel rtl818x_channels[] = {
82}; 82};
83 83
84 84
85
86
87void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data) 85void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
88{ 86{
89 struct rtl8180_priv *priv = dev->priv; 87 struct rtl8180_priv *priv = dev->priv;
@@ -615,7 +613,6 @@ static int rtl8180_start(struct ieee80211_hw *dev)
615 reg |= RTL818X_CMD_TX_ENABLE; 613 reg |= RTL818X_CMD_TX_ENABLE;
616 rtl818x_iowrite8(priv, &priv->map->CMD, reg); 614 rtl818x_iowrite8(priv, &priv->map->CMD, reg);
617 615
618 priv->mode = NL80211_IFTYPE_MONITOR;
619 return 0; 616 return 0;
620 617
621 err_free_rings: 618 err_free_rings:
@@ -633,8 +630,6 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
633 u8 reg; 630 u8 reg;
634 int i; 631 int i;
635 632
636 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
637
638 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0); 633 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
639 634
640 reg = rtl818x_ioread8(priv, &priv->map->CMD); 635 reg = rtl818x_ioread8(priv, &priv->map->CMD);
@@ -657,38 +652,39 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
657} 652}
658 653
659static int rtl8180_add_interface(struct ieee80211_hw *dev, 654static int rtl8180_add_interface(struct ieee80211_hw *dev,
660 struct ieee80211_if_init_conf *conf) 655 struct ieee80211_vif *vif)
661{ 656{
662 struct rtl8180_priv *priv = dev->priv; 657 struct rtl8180_priv *priv = dev->priv;
663 658
664 if (priv->mode != NL80211_IFTYPE_MONITOR) 659 /*
665 return -EOPNOTSUPP; 660 * We only support one active interface at a time.
661 */
662 if (priv->vif)
663 return -EBUSY;
666 664
667 switch (conf->type) { 665 switch (vif->type) {
668 case NL80211_IFTYPE_STATION: 666 case NL80211_IFTYPE_STATION:
669 priv->mode = conf->type;
670 break; 667 break;
671 default: 668 default:
672 return -EOPNOTSUPP; 669 return -EOPNOTSUPP;
673 } 670 }
674 671
675 priv->vif = conf->vif; 672 priv->vif = vif;
676 673
677 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); 674 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
678 rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->MAC[0], 675 rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->MAC[0],
679 le32_to_cpu(*(__le32 *)conf->mac_addr)); 676 le32_to_cpu(*(__le32 *)vif->addr));
680 rtl818x_iowrite16(priv, (__le16 __iomem *)&priv->map->MAC[4], 677 rtl818x_iowrite16(priv, (__le16 __iomem *)&priv->map->MAC[4],
681 le16_to_cpu(*(__le16 *)(conf->mac_addr + 4))); 678 le16_to_cpu(*(__le16 *)(vif->addr + 4)));
682 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); 679 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
683 680
684 return 0; 681 return 0;
685} 682}
686 683
687static void rtl8180_remove_interface(struct ieee80211_hw *dev, 684static void rtl8180_remove_interface(struct ieee80211_hw *dev,
688 struct ieee80211_if_init_conf *conf) 685 struct ieee80211_vif *vif)
689{ 686{
690 struct rtl8180_priv *priv = dev->priv; 687 struct rtl8180_priv *priv = dev->priv;
691 priv->mode = NL80211_IFTYPE_MONITOR;
692 priv->vif = NULL; 688 priv->vif = NULL;
693} 689}
694 690
@@ -765,6 +761,14 @@ static void rtl8180_configure_filter(struct ieee80211_hw *dev,
765 rtl818x_iowrite32(priv, &priv->map->RX_CONF, priv->rx_conf); 761 rtl818x_iowrite32(priv, &priv->map->RX_CONF, priv->rx_conf);
766} 762}
767 763
764static u64 rtl8180_get_tsf(struct ieee80211_hw *dev)
765{
766 struct rtl8180_priv *priv = dev->priv;
767
768 return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
769 (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
770}
771
768static const struct ieee80211_ops rtl8180_ops = { 772static const struct ieee80211_ops rtl8180_ops = {
769 .tx = rtl8180_tx, 773 .tx = rtl8180_tx,
770 .start = rtl8180_start, 774 .start = rtl8180_start,
@@ -775,6 +779,7 @@ static const struct ieee80211_ops rtl8180_ops = {
775 .bss_info_changed = rtl8180_bss_info_changed, 779 .bss_info_changed = rtl8180_bss_info_changed,
776 .prepare_multicast = rtl8180_prepare_multicast, 780 .prepare_multicast = rtl8180_prepare_multicast,
777 .configure_filter = rtl8180_configure_filter, 781 .configure_filter = rtl8180_configure_filter,
782 .get_tsf = rtl8180_get_tsf,
778}; 783};
779 784
780static void rtl8180_eeprom_register_read(struct eeprom_93cx6 *eeprom) 785static void rtl8180_eeprom_register_read(struct eeprom_93cx6 *eeprom)
diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h
index 6af0f3f71f3a..6bb32112e65c 100644
--- a/drivers/net/wireless/rtl818x/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187.h
@@ -92,7 +92,7 @@ struct rtl8187_priv {
92 struct rtl818x_csr *map; 92 struct rtl818x_csr *map;
93 const struct rtl818x_rf_ops *rf; 93 const struct rtl818x_rf_ops *rf;
94 struct ieee80211_vif *vif; 94 struct ieee80211_vif *vif;
95 int mode; 95
96 /* The mutex protects the TX loopback state. 96 /* The mutex protects the TX loopback state.
97 * Any attempt to set channels concurrently locks the device. 97 * Any attempt to set channels concurrently locks the device.
98 */ 98 */
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 7ba3052b0708..0fb850e0c656 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -1019,31 +1019,30 @@ static void rtl8187_stop(struct ieee80211_hw *dev)
1019} 1019}
1020 1020
1021static int rtl8187_add_interface(struct ieee80211_hw *dev, 1021static int rtl8187_add_interface(struct ieee80211_hw *dev,
1022 struct ieee80211_if_init_conf *conf) 1022 struct ieee80211_vif *vif)
1023{ 1023{
1024 struct rtl8187_priv *priv = dev->priv; 1024 struct rtl8187_priv *priv = dev->priv;
1025 int i; 1025 int i;
1026 int ret = -EOPNOTSUPP; 1026 int ret = -EOPNOTSUPP;
1027 1027
1028 mutex_lock(&priv->conf_mutex); 1028 mutex_lock(&priv->conf_mutex);
1029 if (priv->mode != NL80211_IFTYPE_MONITOR) 1029 if (priv->vif)
1030 goto exit; 1030 goto exit;
1031 1031
1032 switch (conf->type) { 1032 switch (vif->type) {
1033 case NL80211_IFTYPE_STATION: 1033 case NL80211_IFTYPE_STATION:
1034 priv->mode = conf->type;
1035 break; 1034 break;
1036 default: 1035 default:
1037 goto exit; 1036 goto exit;
1038 } 1037 }
1039 1038
1040 ret = 0; 1039 ret = 0;
1041 priv->vif = conf->vif; 1040 priv->vif = vif;
1042 1041
1043 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); 1042 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
1044 for (i = 0; i < ETH_ALEN; i++) 1043 for (i = 0; i < ETH_ALEN; i++)
1045 rtl818x_iowrite8(priv, &priv->map->MAC[i], 1044 rtl818x_iowrite8(priv, &priv->map->MAC[i],
1046 ((u8 *)conf->mac_addr)[i]); 1045 ((u8 *)vif->addr)[i]);
1047 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); 1046 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
1048 1047
1049exit: 1048exit:
@@ -1052,11 +1051,10 @@ exit:
1052} 1051}
1053 1052
1054static void rtl8187_remove_interface(struct ieee80211_hw *dev, 1053static void rtl8187_remove_interface(struct ieee80211_hw *dev,
1055 struct ieee80211_if_init_conf *conf) 1054 struct ieee80211_vif *vif)
1056{ 1055{
1057 struct rtl8187_priv *priv = dev->priv; 1056 struct rtl8187_priv *priv = dev->priv;
1058 mutex_lock(&priv->conf_mutex); 1057 mutex_lock(&priv->conf_mutex);
1059 priv->mode = NL80211_IFTYPE_MONITOR;
1060 priv->vif = NULL; 1058 priv->vif = NULL;
1061 mutex_unlock(&priv->conf_mutex); 1059 mutex_unlock(&priv->conf_mutex);
1062} 1060}
@@ -1268,6 +1266,14 @@ static int rtl8187_conf_tx(struct ieee80211_hw *dev, u16 queue,
1268 return 0; 1266 return 0;
1269} 1267}
1270 1268
1269static u64 rtl8187_get_tsf(struct ieee80211_hw *dev)
1270{
1271 struct rtl8187_priv *priv = dev->priv;
1272
1273 return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
1274 (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
1275}
1276
1271static const struct ieee80211_ops rtl8187_ops = { 1277static const struct ieee80211_ops rtl8187_ops = {
1272 .tx = rtl8187_tx, 1278 .tx = rtl8187_tx,
1273 .start = rtl8187_start, 1279 .start = rtl8187_start,
@@ -1279,7 +1285,8 @@ static const struct ieee80211_ops rtl8187_ops = {
1279 .prepare_multicast = rtl8187_prepare_multicast, 1285 .prepare_multicast = rtl8187_prepare_multicast,
1280 .configure_filter = rtl8187_configure_filter, 1286 .configure_filter = rtl8187_configure_filter,
1281 .conf_tx = rtl8187_conf_tx, 1287 .conf_tx = rtl8187_conf_tx,
1282 .rfkill_poll = rtl8187_rfkill_poll 1288 .rfkill_poll = rtl8187_rfkill_poll,
1289 .get_tsf = rtl8187_get_tsf,
1283}; 1290};
1284 1291
1285static void rtl8187_eeprom_register_read(struct eeprom_93cx6 *eeprom) 1292static void rtl8187_eeprom_register_read(struct eeprom_93cx6 *eeprom)
@@ -1366,7 +1373,6 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1366 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; 1373 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
1367 1374
1368 1375
1369 priv->mode = NL80211_IFTYPE_MONITOR;
1370 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 1376 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1371 IEEE80211_HW_SIGNAL_DBM | 1377 IEEE80211_HW_SIGNAL_DBM |
1372 IEEE80211_HW_RX_INCLUDES_FCS; 1378 IEEE80211_HW_RX_INCLUDES_FCS;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.c b/drivers/net/wireless/rtl818x/rtl8187_leds.c
index ded44c045eb2..4637337d5ce6 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_leds.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_leds.c
@@ -33,7 +33,7 @@ static void led_turn_on(struct work_struct *work)
33 struct rtl8187_led *led = &priv->led_tx; 33 struct rtl8187_led *led = &priv->led_tx;
34 34
35 /* Don't change the LED, when the device is down. */ 35 /* Don't change the LED, when the device is down. */
36 if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) 36 if (!priv->vif || priv->vif->type == NL80211_IFTYPE_UNSPECIFIED)
37 return ; 37 return ;
38 38
39 /* Skip if the LED is not registered. */ 39 /* Skip if the LED is not registered. */
@@ -71,7 +71,7 @@ static void led_turn_off(struct work_struct *work)
71 struct rtl8187_led *led = &priv->led_tx; 71 struct rtl8187_led *led = &priv->led_tx;
72 72
73 /* Don't change the LED, when the device is down. */ 73 /* Don't change the LED, when the device is down. */
74 if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) 74 if (!priv->vif || priv->vif->type == NL80211_IFTYPE_UNSPECIFIED)
75 return ; 75 return ;
76 76
77 /* Skip if the LED is not registered. */ 77 /* Skip if the LED is not registered. */
@@ -241,5 +241,5 @@ void rtl8187_leds_exit(struct ieee80211_hw *dev)
241 cancel_delayed_work_sync(&priv->led_off); 241 cancel_delayed_work_sync(&priv->led_off);
242 cancel_delayed_work_sync(&priv->led_on); 242 cancel_delayed_work_sync(&priv->led_on);
243} 243}
244#endif /* def CONFIG_RTL8187_LED */ 244#endif /* def CONFIG_RTL8187_LEDS */
245 245
diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.h b/drivers/net/wireless/rtl818x/rtl8187_leds.h
index efe8041bdda4..d743c96d4a20 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_leds.h
+++ b/drivers/net/wireless/rtl818x/rtl8187_leds.h
@@ -54,6 +54,6 @@ struct rtl8187_led {
54void rtl8187_leds_init(struct ieee80211_hw *dev, u16 code); 54void rtl8187_leds_init(struct ieee80211_hw *dev, u16 code);
55void rtl8187_leds_exit(struct ieee80211_hw *dev); 55void rtl8187_leds_exit(struct ieee80211_hw *dev);
56 56
57#endif /* def CONFIG_RTL8187_LED */ 57#endif /* def CONFIG_RTL8187_LEDS */
58 58
59#endif /* RTL8187_LED_H */ 59#endif /* RTL8187_LED_H */
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
index 62e37ad01cc0..f47ec94c16dc 100644
--- a/drivers/net/wireless/wl12xx/Makefile
+++ b/drivers/net/wireless/wl12xx/Makefile
@@ -10,5 +10,7 @@ obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o
10wl1271-objs = wl1271_main.o wl1271_spi.o wl1271_cmd.o \ 10wl1271-objs = wl1271_main.o wl1271_spi.o wl1271_cmd.o \
11 wl1271_event.o wl1271_tx.o wl1271_rx.o \ 11 wl1271_event.o wl1271_tx.o wl1271_rx.o \
12 wl1271_ps.o wl1271_acx.o wl1271_boot.o \ 12 wl1271_ps.o wl1271_acx.o wl1271_boot.o \
13 wl1271_init.o wl1271_debugfs.o 13 wl1271_init.o wl1271_debugfs.o wl1271_io.o
14
15wl1271-$(CONFIG_NL80211_TESTMODE) += wl1271_testmode.o
14obj-$(CONFIG_WL1271) += wl1271.o 16obj-$(CONFIG_WL1271) += wl1271.o
diff --git a/drivers/net/wireless/wl12xx/wl1251.h b/drivers/net/wireless/wl12xx/wl1251.h
index 054533f7a124..37c61c19cae5 100644
--- a/drivers/net/wireless/wl12xx/wl1251.h
+++ b/drivers/net/wireless/wl12xx/wl1251.h
@@ -247,6 +247,7 @@ struct wl1251_debugfs {
247 struct dentry *rxpipe_tx_xfr_host_int_trig_rx_data; 247 struct dentry *rxpipe_tx_xfr_host_int_trig_rx_data;
248 248
249 struct dentry *tx_queue_len; 249 struct dentry *tx_queue_len;
250 struct dentry *tx_queue_status;
250 251
251 struct dentry *retry_count; 252 struct dentry *retry_count;
252 struct dentry *excessive_retries; 253 struct dentry *excessive_retries;
@@ -340,9 +341,6 @@ struct wl1251 {
340 /* Are we currently scanning */ 341 /* Are we currently scanning */
341 bool scanning; 342 bool scanning;
342 343
343 /* Our association ID */
344 u16 aid;
345
346 /* Default key (for WEP) */ 344 /* Default key (for WEP) */
347 u32 default_key; 345 u32 default_key;
348 346
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.c b/drivers/net/wireless/wl12xx/wl1251_acx.c
index acfa086dbfc5..beff084040b5 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.c
@@ -976,3 +976,72 @@ out:
976 kfree(acx); 976 kfree(acx);
977 return ret; 977 return ret;
978} 978}
979
980int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
981 u8 aifs, u16 txop)
982{
983 struct wl1251_acx_ac_cfg *acx;
984 int ret = 0;
985
986 wl1251_debug(DEBUG_ACX, "acx ac cfg %d cw_ming %d cw_max %d "
987 "aifs %d txop %d", ac, cw_min, cw_max, aifs, txop);
988
989 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
990
991 if (!acx) {
992 ret = -ENOMEM;
993 goto out;
994 }
995
996 acx->ac = ac;
997 acx->cw_min = cw_min;
998 acx->cw_max = cw_max;
999 acx->aifsn = aifs;
1000 acx->txop_limit = txop;
1001
1002 ret = wl1251_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx));
1003 if (ret < 0) {
1004 wl1251_warning("acx ac cfg failed: %d", ret);
1005 goto out;
1006 }
1007
1008out:
1009 kfree(acx);
1010 return ret;
1011}
1012
1013int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
1014 enum wl1251_acx_channel_type type,
1015 u8 tsid, enum wl1251_acx_ps_scheme ps_scheme,
1016 enum wl1251_acx_ack_policy ack_policy)
1017{
1018 struct wl1251_acx_tid_cfg *acx;
1019 int ret = 0;
1020
1021 wl1251_debug(DEBUG_ACX, "acx tid cfg %d type %d tsid %d "
1022 "ps_scheme %d ack_policy %d", queue, type, tsid,
1023 ps_scheme, ack_policy);
1024
1025 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1026
1027 if (!acx) {
1028 ret = -ENOMEM;
1029 goto out;
1030 }
1031
1032 acx->queue = queue;
1033 acx->type = type;
1034 acx->tsid = tsid;
1035 acx->ps_scheme = ps_scheme;
1036 acx->ack_policy = ack_policy;
1037
1038 ret = wl1251_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx));
1039 if (ret < 0) {
1040 wl1251_warning("acx tid cfg failed: %d", ret);
1041 goto out;
1042 }
1043
1044out:
1045 kfree(acx);
1046 return ret;
1047}
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.h b/drivers/net/wireless/wl12xx/wl1251_acx.h
index 652371432cd8..26160c45784c 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.h
@@ -1166,6 +1166,87 @@ struct wl1251_acx_wr_tbtt_and_dtim {
1166 u8 padding; 1166 u8 padding;
1167} __attribute__ ((packed)); 1167} __attribute__ ((packed));
1168 1168
1169struct wl1251_acx_ac_cfg {
1170 struct acx_header header;
1171
1172 /*
1173 * Access Category - The TX queue's access category
1174 * (refer to AccessCategory_enum)
1175 */
1176 u8 ac;
1177
1178 /*
1179 * The contention window minimum size (in slots) for
1180 * the access class.
1181 */
1182 u8 cw_min;
1183
1184 /*
1185 * The contention window maximum size (in slots) for
1186 * the access class.
1187 */
1188 u16 cw_max;
1189
1190 /* The AIF value (in slots) for the access class. */
1191 u8 aifsn;
1192
1193 u8 reserved;
1194
1195 /* The TX Op Limit (in microseconds) for the access class. */
1196 u16 txop_limit;
1197} __attribute__ ((packed));
1198
1199
1200enum wl1251_acx_channel_type {
1201 CHANNEL_TYPE_DCF = 0,
1202 CHANNEL_TYPE_EDCF = 1,
1203 CHANNEL_TYPE_HCCA = 2,
1204};
1205
1206enum wl1251_acx_ps_scheme {
1207 /* regular ps: simple sending of packets */
1208 WL1251_ACX_PS_SCHEME_LEGACY = 0,
1209
1210 /* sending a packet triggers a unscheduled apsd downstream */
1211 WL1251_ACX_PS_SCHEME_UPSD_TRIGGER = 1,
1212
1213 /* a pspoll packet will be sent before every data packet */
1214 WL1251_ACX_PS_SCHEME_LEGACY_PSPOLL = 2,
1215
1216 /* scheduled apsd mode */
1217 WL1251_ACX_PS_SCHEME_SAPSD = 3,
1218};
1219
1220enum wl1251_acx_ack_policy {
1221 WL1251_ACX_ACK_POLICY_LEGACY = 0,
1222 WL1251_ACX_ACK_POLICY_NO_ACK = 1,
1223 WL1251_ACX_ACK_POLICY_BLOCK = 2,
1224};
1225
1226struct wl1251_acx_tid_cfg {
1227 struct acx_header header;
1228
1229 /* tx queue id number (0-7) */
1230 u8 queue;
1231
1232 /* channel access type for the queue, enum wl1251_acx_channel_type */
1233 u8 type;
1234
1235 /* EDCA: ac index (0-3), HCCA: traffic stream id (8-15) */
1236 u8 tsid;
1237
1238 /* ps scheme of the specified queue, enum wl1251_acx_ps_scheme */
1239 u8 ps_scheme;
1240
1241 /* the tx queue ack policy, enum wl1251_acx_ack_policy */
1242 u8 ack_policy;
1243
1244 u8 padding[3];
1245
1246 /* not supported */
1247 u32 apsdconf[2];
1248} __attribute__ ((packed));
1249
1169/************************************************************************* 1250/*************************************************************************
1170 1251
1171 Host Interrupt Register (WiLink -> Host) 1252 Host Interrupt Register (WiLink -> Host)
@@ -1322,5 +1403,11 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime);
1322int wl1251_acx_rate_policies(struct wl1251 *wl); 1403int wl1251_acx_rate_policies(struct wl1251 *wl);
1323int wl1251_acx_mem_cfg(struct wl1251 *wl); 1404int wl1251_acx_mem_cfg(struct wl1251 *wl);
1324int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim); 1405int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim);
1406int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
1407 u8 aifs, u16 txop);
1408int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
1409 enum wl1251_acx_channel_type type,
1410 u8 tsid, enum wl1251_acx_ps_scheme ps_scheme,
1411 enum wl1251_acx_ack_policy ack_policy);
1325 1412
1326#endif /* __WL1251_ACX_H__ */ 1413#endif /* __WL1251_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.c b/drivers/net/wireless/wl12xx/wl1251_cmd.c
index 770f260726bd..0320b478bb3f 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.c
@@ -410,3 +410,86 @@ out:
410 kfree(cmd); 410 kfree(cmd);
411 return ret; 411 return ret;
412} 412}
413
414int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
415 struct ieee80211_channel *channels[],
416 unsigned int n_channels, unsigned int n_probes)
417{
418 struct wl1251_cmd_scan *cmd;
419 int i, ret = 0;
420
421 wl1251_debug(DEBUG_CMD, "cmd scan");
422
423 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
424 if (!cmd)
425 return -ENOMEM;
426
427 cmd->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
428 cmd->params.rx_filter_options = cpu_to_le32(CFG_RX_PRSP_EN |
429 CFG_RX_MGMT_EN |
430 CFG_RX_BCN_EN);
431 cmd->params.scan_options = 0;
432 cmd->params.num_channels = n_channels;
433 cmd->params.num_probe_requests = n_probes;
434 cmd->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
435 cmd->params.tid_trigger = 0;
436
437 for (i = 0; i < n_channels; i++) {
438 cmd->channels[i].min_duration =
439 cpu_to_le32(WL1251_SCAN_MIN_DURATION);
440 cmd->channels[i].max_duration =
441 cpu_to_le32(WL1251_SCAN_MAX_DURATION);
442 memset(&cmd->channels[i].bssid_lsb, 0xff, 4);
443 memset(&cmd->channels[i].bssid_msb, 0xff, 2);
444 cmd->channels[i].early_termination = 0;
445 cmd->channels[i].tx_power_att = 0;
446 cmd->channels[i].channel = channels[i]->hw_value;
447 }
448
449 cmd->params.ssid_len = ssid_len;
450 if (ssid)
451 memcpy(cmd->params.ssid, ssid, ssid_len);
452
453 ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
454 if (ret < 0) {
455 wl1251_error("cmd scan failed: %d", ret);
456 goto out;
457 }
458
459 wl1251_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd));
460
461 if (cmd->header.status != CMD_STATUS_SUCCESS) {
462 wl1251_error("cmd scan status wasn't success: %d",
463 cmd->header.status);
464 ret = -EIO;
465 goto out;
466 }
467
468out:
469 kfree(cmd);
470 return ret;
471}
472
473int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout)
474{
475 struct wl1251_cmd_trigger_scan_to *cmd;
476 int ret;
477
478 wl1251_debug(DEBUG_CMD, "cmd trigger scan to");
479
480 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
481 if (!cmd)
482 return -ENOMEM;
483
484 cmd->timeout = timeout;
485
486 ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
487 if (ret < 0) {
488 wl1251_error("cmd trigger scan to failed: %d", ret);
489 goto out;
490 }
491
492out:
493 kfree(cmd);
494 return ret;
495}
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.h b/drivers/net/wireless/wl12xx/wl1251_cmd.h
index dff798ad0ef5..4ad67cae94d2 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.h
@@ -27,6 +27,8 @@
27 27
28#include "wl1251.h" 28#include "wl1251.h"
29 29
30#include <net/cfg80211.h>
31
30struct acx_header; 32struct acx_header;
31 33
32int wl1251_cmd_send(struct wl1251 *wl, u16 type, void *buf, size_t buf_len); 34int wl1251_cmd_send(struct wl1251 *wl, u16 type, void *buf, size_t buf_len);
@@ -43,6 +45,10 @@ int wl1251_cmd_read_memory(struct wl1251 *wl, u32 addr, void *answer,
43 size_t len); 45 size_t len);
44int wl1251_cmd_template_set(struct wl1251 *wl, u16 cmd_id, 46int wl1251_cmd_template_set(struct wl1251 *wl, u16 cmd_id,
45 void *buf, size_t buf_len); 47 void *buf, size_t buf_len);
48int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
49 struct ieee80211_channel *channels[],
50 unsigned int n_channels, unsigned int n_probes);
51int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout);
46 52
47/* unit ms */ 53/* unit ms */
48#define WL1251_COMMAND_TIMEOUT 2000 54#define WL1251_COMMAND_TIMEOUT 2000
@@ -163,8 +169,12 @@ struct cmd_read_write_memory {
163#define CMDMBOX_HEADER_LEN 4 169#define CMDMBOX_HEADER_LEN 4
164#define CMDMBOX_INFO_ELEM_HEADER_LEN 4 170#define CMDMBOX_INFO_ELEM_HEADER_LEN 4
165 171
172#define WL1251_SCAN_MIN_DURATION 30000
173#define WL1251_SCAN_MAX_DURATION 60000
174
175#define WL1251_SCAN_NUM_PROBES 3
166 176
167struct basic_scan_parameters { 177struct wl1251_scan_parameters {
168 u32 rx_config_options; 178 u32 rx_config_options;
169 u32 rx_filter_options; 179 u32 rx_filter_options;
170 180
@@ -189,11 +199,11 @@ struct basic_scan_parameters {
189 199
190 u8 tid_trigger; 200 u8 tid_trigger;
191 u8 ssid_len; 201 u8 ssid_len;
192 u32 ssid[8]; 202 u8 ssid[32];
193 203
194} __attribute__ ((packed)); 204} __attribute__ ((packed));
195 205
196struct basic_scan_channel_parameters { 206struct wl1251_scan_ch_parameters {
197 u32 min_duration; /* in TU */ 207 u32 min_duration; /* in TU */
198 u32 max_duration; /* in TU */ 208 u32 max_duration; /* in TU */
199 u32 bssid_lsb; 209 u32 bssid_lsb;
@@ -213,11 +223,11 @@ struct basic_scan_channel_parameters {
213/* SCAN parameters */ 223/* SCAN parameters */
214#define SCAN_MAX_NUM_OF_CHANNELS 16 224#define SCAN_MAX_NUM_OF_CHANNELS 16
215 225
216struct cmd_scan { 226struct wl1251_cmd_scan {
217 struct wl1251_cmd_header header; 227 struct wl1251_cmd_header header;
218 228
219 struct basic_scan_parameters params; 229 struct wl1251_scan_parameters params;
220 struct basic_scan_channel_parameters channels[SCAN_MAX_NUM_OF_CHANNELS]; 230 struct wl1251_scan_ch_parameters channels[SCAN_MAX_NUM_OF_CHANNELS];
221} __attribute__ ((packed)); 231} __attribute__ ((packed));
222 232
223enum { 233enum {
diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.c b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
index a00723059f83..0ccba57fb9fb 100644
--- a/drivers/net/wireless/wl12xx/wl1251_debugfs.c
+++ b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
@@ -237,6 +237,27 @@ static const struct file_operations tx_queue_len_ops = {
237 .open = wl1251_open_file_generic, 237 .open = wl1251_open_file_generic,
238}; 238};
239 239
240static ssize_t tx_queue_status_read(struct file *file, char __user *userbuf,
241 size_t count, loff_t *ppos)
242{
243 struct wl1251 *wl = file->private_data;
244 char buf[3], status;
245 int len;
246
247 if (wl->tx_queue_stopped)
248 status = 's';
249 else
250 status = 'r';
251
252 len = scnprintf(buf, sizeof(buf), "%c\n", status);
253 return simple_read_from_buffer(userbuf, count, ppos, buf, len);
254}
255
256static const struct file_operations tx_queue_status_ops = {
257 .read = tx_queue_status_read,
258 .open = wl1251_open_file_generic,
259};
260
240static void wl1251_debugfs_delete_files(struct wl1251 *wl) 261static void wl1251_debugfs_delete_files(struct wl1251 *wl)
241{ 262{
242 DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow); 263 DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow);
@@ -331,6 +352,7 @@ static void wl1251_debugfs_delete_files(struct wl1251 *wl)
331 DEBUGFS_FWSTATS_DEL(rxpipe, tx_xfr_host_int_trig_rx_data); 352 DEBUGFS_FWSTATS_DEL(rxpipe, tx_xfr_host_int_trig_rx_data);
332 353
333 DEBUGFS_DEL(tx_queue_len); 354 DEBUGFS_DEL(tx_queue_len);
355 DEBUGFS_DEL(tx_queue_status);
334 DEBUGFS_DEL(retry_count); 356 DEBUGFS_DEL(retry_count);
335 DEBUGFS_DEL(excessive_retries); 357 DEBUGFS_DEL(excessive_retries);
336} 358}
@@ -431,6 +453,7 @@ static int wl1251_debugfs_add_files(struct wl1251 *wl)
431 DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data); 453 DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
432 454
433 DEBUGFS_ADD(tx_queue_len, wl->debugfs.rootdir); 455 DEBUGFS_ADD(tx_queue_len, wl->debugfs.rootdir);
456 DEBUGFS_ADD(tx_queue_status, wl->debugfs.rootdir);
434 DEBUGFS_ADD(retry_count, wl->debugfs.rootdir); 457 DEBUGFS_ADD(retry_count, wl->debugfs.rootdir);
435 DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir); 458 DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir);
436 459
diff --git a/drivers/net/wireless/wl12xx/wl1251_init.c b/drivers/net/wireless/wl12xx/wl1251_init.c
index 5cb573383eeb..5aad56ea7153 100644
--- a/drivers/net/wireless/wl12xx/wl1251_init.c
+++ b/drivers/net/wireless/wl12xx/wl1251_init.c
@@ -294,6 +294,11 @@ static int wl1251_hw_init_tx_queue_config(struct wl1251 *wl)
294 goto out; 294 goto out;
295 } 295 }
296 296
297 wl1251_acx_ac_cfg(wl, AC_BE, CWMIN_BE, CWMAX_BE, AIFS_DIFS, TXOP_BE);
298 wl1251_acx_ac_cfg(wl, AC_BK, CWMIN_BK, CWMAX_BK, AIFS_DIFS, TXOP_BK);
299 wl1251_acx_ac_cfg(wl, AC_VI, CWMIN_VI, CWMAX_VI, AIFS_DIFS, TXOP_VI);
300 wl1251_acx_ac_cfg(wl, AC_VO, CWMIN_VO, CWMAX_VO, AIFS_DIFS, TXOP_VO);
301
297out: 302out:
298 kfree(config); 303 kfree(config);
299 return ret; 304 return ret;
diff --git a/drivers/net/wireless/wl12xx/wl1251_init.h b/drivers/net/wireless/wl12xx/wl1251_init.h
index b3b25ec885ea..269cefb3e7d4 100644
--- a/drivers/net/wireless/wl12xx/wl1251_init.h
+++ b/drivers/net/wireless/wl12xx/wl1251_init.h
@@ -26,6 +26,53 @@
26 26
27#include "wl1251.h" 27#include "wl1251.h"
28 28
29enum {
30 /* best effort/legacy */
31 AC_BE = 0,
32
33 /* background */
34 AC_BK = 1,
35
36 /* video */
37 AC_VI = 2,
38
39 /* voice */
40 AC_VO = 3,
41
42 /* broadcast dummy access category */
43 AC_BCAST = 4,
44
45 NUM_ACCESS_CATEGORIES = 4
46};
47
48/* following are defult values for the IE fields*/
49#define CWMIN_BK 15
50#define CWMIN_BE 15
51#define CWMIN_VI 7
52#define CWMIN_VO 3
53#define CWMAX_BK 1023
54#define CWMAX_BE 63
55#define CWMAX_VI 15
56#define CWMAX_VO 7
57
58/* slot number setting to start transmission at PIFS interval */
59#define AIFS_PIFS 1
60
61/*
62 * slot number setting to start transmission at DIFS interval - normal DCF
63 * access
64 */
65#define AIFS_DIFS 2
66
67#define AIFSN_BK 7
68#define AIFSN_BE 3
69#define AIFSN_VI AIFS_PIFS
70#define AIFSN_VO AIFS_PIFS
71#define TXOP_BK 0
72#define TXOP_BE 0
73#define TXOP_VI 3008
74#define TXOP_VO 1504
75
29int wl1251_hw_init_hwenc_config(struct wl1251 *wl); 76int wl1251_hw_init_hwenc_config(struct wl1251 *wl);
30int wl1251_hw_init_templates_config(struct wl1251 *wl); 77int wl1251_hw_init_templates_config(struct wl1251 *wl);
31int wl1251_hw_init_rx_config(struct wl1251 *wl, u32 config, u32 filter); 78int wl1251_hw_init_rx_config(struct wl1251 *wl, u32 config, u32 filter);
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 2f50a256efa5..24ae6a360ac8 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -395,6 +395,7 @@ static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
395 * the queue here, otherwise the queue will get too long. 395 * the queue here, otherwise the queue will get too long.
396 */ 396 */
397 if (skb_queue_len(&wl->tx_queue) >= WL1251_TX_QUEUE_MAX_LENGTH) { 397 if (skb_queue_len(&wl->tx_queue) >= WL1251_TX_QUEUE_MAX_LENGTH) {
398 wl1251_debug(DEBUG_TX, "op_tx: tx_queue full, stop queues");
398 ieee80211_stop_queues(wl->hw); 399 ieee80211_stop_queues(wl->hw);
399 400
400 /* 401 /*
@@ -510,13 +511,13 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
510} 511}
511 512
512static int wl1251_op_add_interface(struct ieee80211_hw *hw, 513static int wl1251_op_add_interface(struct ieee80211_hw *hw,
513 struct ieee80211_if_init_conf *conf) 514 struct ieee80211_vif *vif)
514{ 515{
515 struct wl1251 *wl = hw->priv; 516 struct wl1251 *wl = hw->priv;
516 int ret = 0; 517 int ret = 0;
517 518
518 wl1251_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", 519 wl1251_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
519 conf->type, conf->mac_addr); 520 vif->type, vif->addr);
520 521
521 mutex_lock(&wl->mutex); 522 mutex_lock(&wl->mutex);
522 if (wl->vif) { 523 if (wl->vif) {
@@ -524,9 +525,9 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
524 goto out; 525 goto out;
525 } 526 }
526 527
527 wl->vif = conf->vif; 528 wl->vif = vif;
528 529
529 switch (conf->type) { 530 switch (vif->type) {
530 case NL80211_IFTYPE_STATION: 531 case NL80211_IFTYPE_STATION:
531 wl->bss_type = BSS_TYPE_STA_BSS; 532 wl->bss_type = BSS_TYPE_STA_BSS;
532 break; 533 break;
@@ -538,8 +539,8 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
538 goto out; 539 goto out;
539 } 540 }
540 541
541 if (memcmp(wl->mac_addr, conf->mac_addr, ETH_ALEN)) { 542 if (memcmp(wl->mac_addr, vif->addr, ETH_ALEN)) {
542 memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN); 543 memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
543 SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr); 544 SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
544 ret = wl1251_acx_station_id(wl); 545 ret = wl1251_acx_station_id(wl);
545 if (ret < 0) 546 if (ret < 0)
@@ -552,7 +553,7 @@ out:
552} 553}
553 554
554static void wl1251_op_remove_interface(struct ieee80211_hw *hw, 555static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
555 struct ieee80211_if_init_conf *conf) 556 struct ieee80211_vif *vif)
556{ 557{
557 struct wl1251 *wl = hw->priv; 558 struct wl1251 *wl = hw->priv;
558 559
@@ -562,43 +563,25 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
562 mutex_unlock(&wl->mutex); 563 mutex_unlock(&wl->mutex);
563} 564}
564 565
565static int wl1251_build_null_data(struct wl1251 *wl) 566static int wl1251_build_qos_null_data(struct wl1251 *wl)
566{ 567{
567 struct wl12xx_null_data_template template; 568 struct ieee80211_qos_hdr template;
568 569
569 if (!is_zero_ether_addr(wl->bssid)) { 570 memset(&template, 0, sizeof(template));
570 memcpy(template.header.da, wl->bssid, ETH_ALEN);
571 memcpy(template.header.bssid, wl->bssid, ETH_ALEN);
572 } else {
573 memset(template.header.da, 0xff, ETH_ALEN);
574 memset(template.header.bssid, 0xff, ETH_ALEN);
575 }
576
577 memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
578 template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
579 IEEE80211_STYPE_NULLFUNC |
580 IEEE80211_FCTL_TODS);
581
582 return wl1251_cmd_template_set(wl, CMD_NULL_DATA, &template,
583 sizeof(template));
584
585}
586
587static int wl1251_build_ps_poll(struct wl1251 *wl, u16 aid)
588{
589 struct wl12xx_ps_poll_template template;
590 571
591 memcpy(template.bssid, wl->bssid, ETH_ALEN); 572 memcpy(template.addr1, wl->bssid, ETH_ALEN);
592 memcpy(template.ta, wl->mac_addr, ETH_ALEN); 573 memcpy(template.addr2, wl->mac_addr, ETH_ALEN);
574 memcpy(template.addr3, wl->bssid, ETH_ALEN);
593 575
594 /* aid in PS-Poll has its two MSBs each set to 1 */ 576 template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
595 template.aid = cpu_to_le16(1 << 15 | 1 << 14 | aid); 577 IEEE80211_STYPE_QOS_NULLFUNC |
578 IEEE80211_FCTL_TODS);
596 579
597 template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); 580 /* FIXME: not sure what priority to use here */
581 template.qos_ctrl = cpu_to_le16(0);
598 582
599 return wl1251_cmd_template_set(wl, CMD_PS_POLL, &template, 583 return wl1251_cmd_template_set(wl, CMD_QOS_NULL_DATA, &template,
600 sizeof(template)); 584 sizeof(template));
601
602} 585}
603 586
604static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed) 587static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
@@ -634,26 +617,34 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
634 617
635 wl->psm_requested = true; 618 wl->psm_requested = true;
636 619
620 wl->dtim_period = conf->ps_dtim_period;
621
622 ret = wl1251_acx_wr_tbtt_and_dtim(wl, wl->beacon_int,
623 wl->dtim_period);
624
637 /* 625 /*
638 * We enter PSM only if we're already associated. 626 * mac80211 enables PSM only if we're already associated.
639 * If we're not, we'll enter it when joining an SSID,
640 * through the bss_info_changed() hook.
641 */ 627 */
642 ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE); 628 ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
629 if (ret < 0)
630 goto out_sleep;
643 } else if (!(conf->flags & IEEE80211_CONF_PS) && 631 } else if (!(conf->flags & IEEE80211_CONF_PS) &&
644 wl->psm_requested) { 632 wl->psm_requested) {
645 wl1251_debug(DEBUG_PSM, "psm disabled"); 633 wl1251_debug(DEBUG_PSM, "psm disabled");
646 634
647 wl->psm_requested = false; 635 wl->psm_requested = false;
648 636
649 if (wl->psm) 637 if (wl->psm) {
650 ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE); 638 ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE);
639 if (ret < 0)
640 goto out_sleep;
641 }
651 } 642 }
652 643
653 if (conf->power_level != wl->power_level) { 644 if (conf->power_level != wl->power_level) {
654 ret = wl1251_acx_tx_power(wl, conf->power_level); 645 ret = wl1251_acx_tx_power(wl, conf->power_level);
655 if (ret < 0) 646 if (ret < 0)
656 goto out; 647 goto out_sleep;
657 648
658 wl->power_level = conf->power_level; 649 wl->power_level = conf->power_level;
659 } 650 }
@@ -864,199 +855,61 @@ out:
864 return ret; 855 return ret;
865} 856}
866 857
867static int wl1251_build_basic_rates(char *rates) 858static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
868{ 859 struct cfg80211_scan_request *req)
869 u8 index = 0;
870
871 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
872 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
873 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
874 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
875
876 return index;
877}
878
879static int wl1251_build_extended_rates(char *rates)
880{ 860{
881 u8 index = 0; 861 struct wl1251 *wl = hw->priv;
882 862 struct sk_buff *skb;
883 rates[index++] = IEEE80211_OFDM_RATE_6MB; 863 size_t ssid_len = 0;
884 rates[index++] = IEEE80211_OFDM_RATE_9MB; 864 u8 *ssid = NULL;
885 rates[index++] = IEEE80211_OFDM_RATE_12MB; 865 int ret;
886 rates[index++] = IEEE80211_OFDM_RATE_18MB;
887 rates[index++] = IEEE80211_OFDM_RATE_24MB;
888 rates[index++] = IEEE80211_OFDM_RATE_36MB;
889 rates[index++] = IEEE80211_OFDM_RATE_48MB;
890 rates[index++] = IEEE80211_OFDM_RATE_54MB;
891
892 return index;
893}
894
895 866
896static int wl1251_build_probe_req(struct wl1251 *wl, u8 *ssid, size_t ssid_len) 867 wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan");
897{
898 struct wl12xx_probe_req_template template;
899 struct wl12xx_ie_rates *rates;
900 char *ptr;
901 u16 size;
902
903 ptr = (char *)&template;
904 size = sizeof(struct ieee80211_header);
905
906 memset(template.header.da, 0xff, ETH_ALEN);
907 memset(template.header.bssid, 0xff, ETH_ALEN);
908 memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
909 template.header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
910
911 /* IEs */
912 /* SSID */
913 template.ssid.header.id = WLAN_EID_SSID;
914 template.ssid.header.len = ssid_len;
915 if (ssid_len && ssid)
916 memcpy(template.ssid.ssid, ssid, ssid_len);
917 size += sizeof(struct wl12xx_ie_header) + ssid_len;
918 ptr += size;
919
920 /* Basic Rates */
921 rates = (struct wl12xx_ie_rates *)ptr;
922 rates->header.id = WLAN_EID_SUPP_RATES;
923 rates->header.len = wl1251_build_basic_rates(rates->rates);
924 size += sizeof(struct wl12xx_ie_header) + rates->header.len;
925 ptr += sizeof(struct wl12xx_ie_header) + rates->header.len;
926
927 /* Extended rates */
928 rates = (struct wl12xx_ie_rates *)ptr;
929 rates->header.id = WLAN_EID_EXT_SUPP_RATES;
930 rates->header.len = wl1251_build_extended_rates(rates->rates);
931 size += sizeof(struct wl12xx_ie_header) + rates->header.len;
932
933 wl1251_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size);
934
935 return wl1251_cmd_template_set(wl, CMD_PROBE_REQ, &template,
936 size);
937}
938 868
939static int wl1251_hw_scan(struct wl1251 *wl, u8 *ssid, size_t len, 869 if (req->n_ssids) {
940 u8 active_scan, u8 high_prio, u8 num_channels, 870 ssid = req->ssids[0].ssid;
941 u8 probe_requests) 871 ssid_len = req->ssids[0].ssid_len;
942{
943 struct wl1251_cmd_trigger_scan_to *trigger = NULL;
944 struct cmd_scan *params = NULL;
945 int i, ret;
946 u16 scan_options = 0;
947
948 if (wl->scanning)
949 return -EINVAL;
950
951 params = kzalloc(sizeof(*params), GFP_KERNEL);
952 if (!params)
953 return -ENOMEM;
954
955 params->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
956 params->params.rx_filter_options =
957 cpu_to_le32(CFG_RX_PRSP_EN | CFG_RX_MGMT_EN | CFG_RX_BCN_EN);
958
959 /* High priority scan */
960 if (!active_scan)
961 scan_options |= SCAN_PASSIVE;
962 if (high_prio)
963 scan_options |= SCAN_PRIORITY_HIGH;
964 params->params.scan_options = scan_options;
965
966 params->params.num_channels = num_channels;
967 params->params.num_probe_requests = probe_requests;
968 params->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
969 params->params.tid_trigger = 0;
970
971 for (i = 0; i < num_channels; i++) {
972 params->channels[i].min_duration = cpu_to_le32(30000);
973 params->channels[i].max_duration = cpu_to_le32(60000);
974 memset(&params->channels[i].bssid_lsb, 0xff, 4);
975 memset(&params->channels[i].bssid_msb, 0xff, 2);
976 params->channels[i].early_termination = 0;
977 params->channels[i].tx_power_att = 0;
978 params->channels[i].channel = i + 1;
979 memset(params->channels[i].pad, 0, 3);
980 } 872 }
981 873
982 for (i = num_channels; i < SCAN_MAX_NUM_OF_CHANNELS; i++) 874 mutex_lock(&wl->mutex);
983 memset(&params->channels[i], 0,
984 sizeof(struct basic_scan_channel_parameters));
985
986 if (len && ssid) {
987 params->params.ssid_len = len;
988 memcpy(params->params.ssid, ssid, len);
989 } else {
990 params->params.ssid_len = 0;
991 memset(params->params.ssid, 0, 32);
992 }
993 875
994 ret = wl1251_build_probe_req(wl, ssid, len); 876 if (wl->scanning) {
995 if (ret < 0) { 877 wl1251_debug(DEBUG_SCAN, "scan already in progress");
996 wl1251_error("PROBE request template failed"); 878 ret = -EINVAL;
997 goto out; 879 goto out;
998 } 880 }
999 881
1000 trigger = kzalloc(sizeof(*trigger), GFP_KERNEL); 882 ret = wl1251_ps_elp_wakeup(wl);
1001 if (!trigger) 883 if (ret < 0)
1002 goto out; 884 goto out;
1003 885
1004 trigger->timeout = 0; 886 skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
1005 887 req->ie, req->ie_len);
1006 ret = wl1251_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger, 888 if (!skb) {
1007 sizeof(*trigger)); 889 ret = -ENOMEM;
1008 if (ret < 0) {
1009 wl1251_error("trigger scan to failed for hw scan");
1010 goto out; 890 goto out;
1011 } 891 }
1012 892
1013 wl1251_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params)); 893 ret = wl1251_cmd_template_set(wl, CMD_PROBE_REQ, skb->data,
1014 894 skb->len);
1015 wl->scanning = true; 895 dev_kfree_skb(skb);
896 if (ret < 0)
897 goto out_sleep;
1016 898
1017 ret = wl1251_cmd_send(wl, CMD_SCAN, params, sizeof(*params)); 899 ret = wl1251_cmd_trigger_scan_to(wl, 0);
1018 if (ret < 0) 900 if (ret < 0)
1019 wl1251_error("SCAN failed"); 901 goto out_sleep;
1020 902
1021 wl1251_mem_read(wl, wl->cmd_box_addr, params, sizeof(*params)); 903 wl->scanning = true;
1022 904
1023 if (params->header.status != CMD_STATUS_SUCCESS) { 905 ret = wl1251_cmd_scan(wl, ssid, ssid_len, req->channels,
1024 wl1251_error("TEST command answer error: %d", 906 req->n_channels, WL1251_SCAN_NUM_PROBES);
1025 params->header.status); 907 if (ret < 0) {
1026 wl->scanning = false; 908 wl->scanning = false;
1027 ret = -EIO; 909 goto out_sleep;
1028 goto out;
1029 }
1030
1031out:
1032 kfree(params);
1033 return ret;
1034
1035}
1036
1037static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
1038 struct cfg80211_scan_request *req)
1039{
1040 struct wl1251 *wl = hw->priv;
1041 int ret;
1042 u8 *ssid = NULL;
1043 size_t ssid_len = 0;
1044
1045 wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan");
1046
1047 if (req->n_ssids) {
1048 ssid = req->ssids[0].ssid;
1049 ssid_len = req->ssids[0].ssid_len;
1050 } 910 }
1051 911
1052 mutex_lock(&wl->mutex); 912out_sleep:
1053
1054 ret = wl1251_ps_elp_wakeup(wl);
1055 if (ret < 0)
1056 goto out;
1057
1058 ret = wl1251_hw_scan(hw->priv, ssid, ssid_len, 1, 0, 13, 3);
1059
1060 wl1251_ps_elp_sleep(wl); 913 wl1251_ps_elp_sleep(wl);
1061 914
1062out: 915out:
@@ -1093,9 +946,8 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1093 struct ieee80211_bss_conf *bss_conf, 946 struct ieee80211_bss_conf *bss_conf,
1094 u32 changed) 947 u32 changed)
1095{ 948{
1096 enum wl1251_cmd_ps_mode mode;
1097 struct wl1251 *wl = hw->priv; 949 struct wl1251 *wl = hw->priv;
1098 struct sk_buff *beacon; 950 struct sk_buff *beacon, *skb;
1099 int ret; 951 int ret;
1100 952
1101 wl1251_debug(DEBUG_MAC80211, "mac80211 bss info changed"); 953 wl1251_debug(DEBUG_MAC80211, "mac80211 bss info changed");
@@ -1109,7 +961,17 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1109 if (changed & BSS_CHANGED_BSSID) { 961 if (changed & BSS_CHANGED_BSSID) {
1110 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN); 962 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
1111 963
1112 ret = wl1251_build_null_data(wl); 964 skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
965 if (!skb)
966 goto out_sleep;
967
968 ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA,
969 skb->data, skb->len);
970 dev_kfree_skb(skb);
971 if (ret < 0)
972 goto out_sleep;
973
974 ret = wl1251_build_qos_null_data(wl);
1113 if (ret < 0) 975 if (ret < 0)
1114 goto out; 976 goto out;
1115 977
@@ -1124,27 +986,21 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1124 if (changed & BSS_CHANGED_ASSOC) { 986 if (changed & BSS_CHANGED_ASSOC) {
1125 if (bss_conf->assoc) { 987 if (bss_conf->assoc) {
1126 wl->beacon_int = bss_conf->beacon_int; 988 wl->beacon_int = bss_conf->beacon_int;
1127 wl->dtim_period = bss_conf->dtim_period;
1128 989
1129 ret = wl1251_acx_wr_tbtt_and_dtim(wl, wl->beacon_int, 990 skb = ieee80211_pspoll_get(wl->hw, wl->vif);
1130 wl->dtim_period); 991 if (!skb)
1131 wl->aid = bss_conf->aid; 992 goto out_sleep;
1132 993
1133 ret = wl1251_build_ps_poll(wl, wl->aid); 994 ret = wl1251_cmd_template_set(wl, CMD_PS_POLL,
995 skb->data,
996 skb->len);
997 dev_kfree_skb(skb);
1134 if (ret < 0) 998 if (ret < 0)
1135 goto out_sleep; 999 goto out_sleep;
1136 1000
1137 ret = wl1251_acx_aid(wl, wl->aid); 1001 ret = wl1251_acx_aid(wl, bss_conf->aid);
1138 if (ret < 0) 1002 if (ret < 0)
1139 goto out_sleep; 1003 goto out_sleep;
1140
1141 /* If we want to go in PSM but we're not there yet */
1142 if (wl->psm_requested && !wl->psm) {
1143 mode = STATION_POWER_SAVE_MODE;
1144 ret = wl1251_ps_set_mode(wl, mode);
1145 if (ret < 0)
1146 goto out_sleep;
1147 }
1148 } else { 1004 } else {
1149 /* use defaults when not associated */ 1005 /* use defaults when not associated */
1150 wl->beacon_int = WL1251_DEFAULT_BEACON_INT; 1006 wl->beacon_int = WL1251_DEFAULT_BEACON_INT;
@@ -1176,7 +1032,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1176 ret = wl1251_acx_cts_protect(wl, CTSPROTECT_DISABLE); 1032 ret = wl1251_acx_cts_protect(wl, CTSPROTECT_DISABLE);
1177 if (ret < 0) { 1033 if (ret < 0) {
1178 wl1251_warning("Set ctsprotect failed %d", ret); 1034 wl1251_warning("Set ctsprotect failed %d", ret);
1179 goto out; 1035 goto out_sleep;
1180 } 1036 }
1181 } 1037 }
1182 1038
@@ -1187,7 +1043,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1187 1043
1188 if (ret < 0) { 1044 if (ret < 0) {
1189 dev_kfree_skb(beacon); 1045 dev_kfree_skb(beacon);
1190 goto out; 1046 goto out_sleep;
1191 } 1047 }
1192 1048
1193 ret = wl1251_cmd_template_set(wl, CMD_PROBE_RESP, beacon->data, 1049 ret = wl1251_cmd_template_set(wl, CMD_PROBE_RESP, beacon->data,
@@ -1196,13 +1052,13 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1196 dev_kfree_skb(beacon); 1052 dev_kfree_skb(beacon);
1197 1053
1198 if (ret < 0) 1054 if (ret < 0)
1199 goto out; 1055 goto out_sleep;
1200 1056
1201 ret = wl1251_join(wl, wl->bss_type, wl->beacon_int, 1057 ret = wl1251_join(wl, wl->bss_type, wl->beacon_int,
1202 wl->channel, wl->dtim_period); 1058 wl->channel, wl->dtim_period);
1203 1059
1204 if (ret < 0) 1060 if (ret < 0)
1205 goto out; 1061 goto out_sleep;
1206 } 1062 }
1207 1063
1208out_sleep: 1064out_sleep:
@@ -1273,6 +1129,49 @@ static struct ieee80211_channel wl1251_channels[] = {
1273 { .hw_value = 13, .center_freq = 2472}, 1129 { .hw_value = 13, .center_freq = 2472},
1274}; 1130};
1275 1131
1132static int wl1251_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
1133 const struct ieee80211_tx_queue_params *params)
1134{
1135 enum wl1251_acx_ps_scheme ps_scheme;
1136 struct wl1251 *wl = hw->priv;
1137 int ret;
1138
1139 mutex_lock(&wl->mutex);
1140
1141 wl1251_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
1142
1143 ret = wl1251_ps_elp_wakeup(wl);
1144 if (ret < 0)
1145 goto out;
1146
1147 /* mac80211 uses units of 32 usec */
1148 ret = wl1251_acx_ac_cfg(wl, wl1251_tx_get_queue(queue),
1149 params->cw_min, params->cw_max,
1150 params->aifs, params->txop * 32);
1151 if (ret < 0)
1152 goto out_sleep;
1153
1154 if (params->uapsd)
1155 ps_scheme = WL1251_ACX_PS_SCHEME_UPSD_TRIGGER;
1156 else
1157 ps_scheme = WL1251_ACX_PS_SCHEME_LEGACY;
1158
1159 ret = wl1251_acx_tid_cfg(wl, wl1251_tx_get_queue(queue),
1160 CHANNEL_TYPE_EDCF,
1161 wl1251_tx_get_queue(queue), ps_scheme,
1162 WL1251_ACX_ACK_POLICY_LEGACY);
1163 if (ret < 0)
1164 goto out_sleep;
1165
1166out_sleep:
1167 wl1251_ps_elp_sleep(wl);
1168
1169out:
1170 mutex_unlock(&wl->mutex);
1171
1172 return ret;
1173}
1174
1276/* can't be const, mac80211 writes to this */ 1175/* can't be const, mac80211 writes to this */
1277static struct ieee80211_supported_band wl1251_band_2ghz = { 1176static struct ieee80211_supported_band wl1251_band_2ghz = {
1278 .channels = wl1251_channels, 1177 .channels = wl1251_channels,
@@ -1293,6 +1192,7 @@ static const struct ieee80211_ops wl1251_ops = {
1293 .hw_scan = wl1251_op_hw_scan, 1192 .hw_scan = wl1251_op_hw_scan,
1294 .bss_info_changed = wl1251_op_bss_info_changed, 1193 .bss_info_changed = wl1251_op_bss_info_changed,
1295 .set_rts_threshold = wl1251_op_set_rts_threshold, 1194 .set_rts_threshold = wl1251_op_set_rts_threshold,
1195 .conf_tx = wl1251_op_conf_tx,
1296}; 1196};
1297 1197
1298static int wl1251_register_hw(struct wl1251 *wl) 1198static int wl1251_register_hw(struct wl1251 *wl)
@@ -1332,12 +1232,15 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
1332 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | 1232 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
1333 IEEE80211_HW_NOISE_DBM | 1233 IEEE80211_HW_NOISE_DBM |
1334 IEEE80211_HW_SUPPORTS_PS | 1234 IEEE80211_HW_SUPPORTS_PS |
1335 IEEE80211_HW_BEACON_FILTER; 1235 IEEE80211_HW_BEACON_FILTER |
1236 IEEE80211_HW_SUPPORTS_UAPSD;
1336 1237
1337 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 1238 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1338 wl->hw->wiphy->max_scan_ssids = 1; 1239 wl->hw->wiphy->max_scan_ssids = 1;
1339 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz; 1240 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz;
1340 1241
1242 wl->hw->queues = 4;
1243
1341 ret = wl1251_register_hw(wl); 1244 ret = wl1251_register_hw(wl);
1342 if (ret) 1245 if (ret)
1343 goto out; 1246 goto out;
diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.c b/drivers/net/wireless/wl12xx/wl1251_ps.c
index 9931b197ff77..851dfb65e474 100644
--- a/drivers/net/wireless/wl12xx/wl1251_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1251_ps.c
@@ -26,7 +26,8 @@
26#include "wl1251_cmd.h" 26#include "wl1251_cmd.h"
27#include "wl1251_io.h" 27#include "wl1251_io.h"
28 28
29#define WL1251_WAKEUP_TIMEOUT 2000 29/* in ms */
30#define WL1251_WAKEUP_TIMEOUT 100
30 31
31void wl1251_elp_work(struct work_struct *work) 32void wl1251_elp_work(struct work_struct *work)
32{ 33{
@@ -67,7 +68,7 @@ void wl1251_ps_elp_sleep(struct wl1251 *wl)
67 68
68int wl1251_ps_elp_wakeup(struct wl1251 *wl) 69int wl1251_ps_elp_wakeup(struct wl1251 *wl)
69{ 70{
70 unsigned long timeout; 71 unsigned long timeout, start;
71 u32 elp_reg; 72 u32 elp_reg;
72 73
73 if (!wl->elp) 74 if (!wl->elp)
@@ -75,6 +76,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
75 76
76 wl1251_debug(DEBUG_PSM, "waking up chip from elp"); 77 wl1251_debug(DEBUG_PSM, "waking up chip from elp");
77 78
79 start = jiffies;
78 timeout = jiffies + msecs_to_jiffies(WL1251_WAKEUP_TIMEOUT); 80 timeout = jiffies + msecs_to_jiffies(WL1251_WAKEUP_TIMEOUT);
79 81
80 wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP); 82 wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
@@ -95,8 +97,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
95 } 97 }
96 98
97 wl1251_debug(DEBUG_PSM, "wakeup time: %u ms", 99 wl1251_debug(DEBUG_PSM, "wakeup time: %u ms",
98 jiffies_to_msecs(jiffies) - 100 jiffies_to_msecs(jiffies - start));
99 (jiffies_to_msecs(timeout) - WL1251_WAKEUP_TIMEOUT));
100 101
101 wl->elp = false; 102 wl->elp = false;
102 103
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c
index f84cc89cbffc..b56732226cc0 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.c
@@ -126,7 +126,7 @@ static void wl1251_rx_body(struct wl1251 *wl,
126 if (wl->rx_current_buffer) 126 if (wl->rx_current_buffer)
127 rx_packet_ring_addr += wl->data_path->rx_packet_ring_chunk_size; 127 rx_packet_ring_addr += wl->data_path->rx_packet_ring_chunk_size;
128 128
129 skb = dev_alloc_skb(length); 129 skb = __dev_alloc_skb(length, GFP_KERNEL);
130 if (!skb) { 130 if (!skb) {
131 wl1251_error("Couldn't allocate RX frame"); 131 wl1251_error("Couldn't allocate RX frame");
132 return; 132 return;
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.c b/drivers/net/wireless/wl12xx/wl1251_tx.c
index f85970615849..c8223185efd2 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.c
@@ -167,8 +167,7 @@ static int wl1251_tx_fill_hdr(struct wl1251 *wl, struct sk_buff *skb,
167 tx_hdr->expiry_time = cpu_to_le32(1 << 16); 167 tx_hdr->expiry_time = cpu_to_le32(1 << 16);
168 tx_hdr->id = id; 168 tx_hdr->id = id;
169 169
170 /* FIXME: how to get the correct queue id? */ 170 tx_hdr->xmit_queue = wl1251_tx_get_queue(skb_get_queue_mapping(skb));
171 tx_hdr->xmit_queue = 0;
172 171
173 wl1251_tx_control(tx_hdr, control, fc); 172 wl1251_tx_control(tx_hdr, control, fc);
174 wl1251_tx_frag_block_num(tx_hdr); 173 wl1251_tx_frag_block_num(tx_hdr);
@@ -220,6 +219,7 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
220 /* align the buffer on a 4-byte boundary */ 219 /* align the buffer on a 4-byte boundary */
221 skb_reserve(skb, offset); 220 skb_reserve(skb, offset);
222 memmove(skb->data, src, skb->len); 221 memmove(skb->data, src, skb->len);
222 tx_hdr = (struct tx_double_buffer_desc *) skb->data;
223 } else { 223 } else {
224 wl1251_info("No handler, fixme!"); 224 wl1251_info("No handler, fixme!");
225 return -EINVAL; 225 return -EINVAL;
@@ -237,8 +237,9 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
237 237
238 wl1251_mem_write(wl, addr, skb->data, len); 238 wl1251_mem_write(wl, addr, skb->data, len);
239 239
240 wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x", 240 wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x "
241 tx_hdr->id, skb, tx_hdr->length, tx_hdr->rate); 241 "queue %d", tx_hdr->id, skb, tx_hdr->length,
242 tx_hdr->rate, tx_hdr->xmit_queue);
242 243
243 return 0; 244 return 0;
244} 245}
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.h b/drivers/net/wireless/wl12xx/wl1251_tx.h
index 7c1c1665c810..55856c6bb97a 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.h
@@ -26,6 +26,7 @@
26#define __WL1251_TX_H__ 26#define __WL1251_TX_H__
27 27
28#include <linux/bitops.h> 28#include <linux/bitops.h>
29#include "wl1251_acx.h"
29 30
30/* 31/*
31 * 32 *
@@ -209,6 +210,22 @@ struct tx_result {
209 u8 done_2; 210 u8 done_2;
210} __attribute__ ((packed)); 211} __attribute__ ((packed));
211 212
213static inline int wl1251_tx_get_queue(int queue)
214{
215 switch (queue) {
216 case 0:
217 return QOS_AC_VO;
218 case 1:
219 return QOS_AC_VI;
220 case 2:
221 return QOS_AC_BE;
222 case 3:
223 return QOS_AC_BK;
224 default:
225 return QOS_AC_BE;
226 }
227}
228
212void wl1251_tx_work(struct work_struct *work); 229void wl1251_tx_work(struct work_struct *work);
213void wl1251_tx_complete(struct wl1251 *wl); 230void wl1251_tx_complete(struct wl1251 *wl);
214void wl1251_tx_flush(struct wl1251 *wl); 231void wl1251_tx_flush(struct wl1251 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index 94359b1a861f..97ea5096bc8c 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -43,7 +43,7 @@ enum {
43 DEBUG_SPI = BIT(1), 43 DEBUG_SPI = BIT(1),
44 DEBUG_BOOT = BIT(2), 44 DEBUG_BOOT = BIT(2),
45 DEBUG_MAILBOX = BIT(3), 45 DEBUG_MAILBOX = BIT(3),
46 DEBUG_NETLINK = BIT(4), 46 DEBUG_TESTMODE = BIT(4),
47 DEBUG_EVENT = BIT(5), 47 DEBUG_EVENT = BIT(5),
48 DEBUG_TX = BIT(6), 48 DEBUG_TX = BIT(6),
49 DEBUG_RX = BIT(7), 49 DEBUG_RX = BIT(7),
@@ -107,11 +107,36 @@ enum {
107 CFG_RX_CTL_EN | CFG_RX_BCN_EN | \ 107 CFG_RX_CTL_EN | CFG_RX_BCN_EN | \
108 CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN) 108 CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
109 109
110#define WL1271_DEFAULT_BASIC_RATE_SET (CONF_TX_RATE_MASK_ALL)
111
112#define WL1271_FW_NAME "wl1271-fw.bin" 110#define WL1271_FW_NAME "wl1271-fw.bin"
113#define WL1271_NVS_NAME "wl1271-nvs.bin" 111#define WL1271_NVS_NAME "wl1271-nvs.bin"
114 112
113/* NVS data structure */
114#define WL1271_NVS_SECTION_SIZE 468
115
116#define WL1271_NVS_GENERAL_PARAMS_SIZE 57
117#define WL1271_NVS_GENERAL_PARAMS_SIZE_PADDED \
118 (WL1271_NVS_GENERAL_PARAMS_SIZE + 1)
119#define WL1271_NVS_STAT_RADIO_PARAMS_SIZE 17
120#define WL1271_NVS_STAT_RADIO_PARAMS_SIZE_PADDED \
121 (WL1271_NVS_STAT_RADIO_PARAMS_SIZE + 1)
122#define WL1271_NVS_DYN_RADIO_PARAMS_SIZE 65
123#define WL1271_NVS_DYN_RADIO_PARAMS_SIZE_PADDED \
124 (WL1271_NVS_DYN_RADIO_PARAMS_SIZE + 1)
125#define WL1271_NVS_FEM_COUNT 2
126#define WL1271_NVS_INI_SPARE_SIZE 124
127
128struct wl1271_nvs_file {
129 /* NVS section */
130 u8 nvs[WL1271_NVS_SECTION_SIZE];
131
132 /* INI section */
133 u8 general_params[WL1271_NVS_GENERAL_PARAMS_SIZE_PADDED];
134 u8 stat_radio_params[WL1271_NVS_STAT_RADIO_PARAMS_SIZE_PADDED];
135 u8 dyn_radio_params[WL1271_NVS_FEM_COUNT]
136 [WL1271_NVS_DYN_RADIO_PARAMS_SIZE_PADDED];
137 u8 ini_spare[WL1271_NVS_INI_SPARE_SIZE];
138} __attribute__ ((packed));
139
115/* 140/*
116 * Enable/disable 802.11a support for WL1273 141 * Enable/disable 802.11a support for WL1273
117 */ 142 */
@@ -276,6 +301,7 @@ struct wl1271_debugfs {
276 301
277 struct dentry *retry_count; 302 struct dentry *retry_count;
278 struct dentry *excessive_retries; 303 struct dentry *excessive_retries;
304 struct dentry *gpio_power;
279}; 305};
280 306
281#define NUM_TX_QUEUES 4 307#define NUM_TX_QUEUES 4
@@ -322,6 +348,17 @@ struct wl1271 {
322 enum wl1271_state state; 348 enum wl1271_state state;
323 struct mutex mutex; 349 struct mutex mutex;
324 350
351#define WL1271_FLAG_STA_RATES_CHANGED (0)
352#define WL1271_FLAG_STA_ASSOCIATED (1)
353#define WL1271_FLAG_JOINED (2)
354#define WL1271_FLAG_GPIO_POWER (3)
355#define WL1271_FLAG_TX_QUEUE_STOPPED (4)
356#define WL1271_FLAG_SCANNING (5)
357#define WL1271_FLAG_IN_ELP (6)
358#define WL1271_FLAG_PSM (7)
359#define WL1271_FLAG_PSM_REQUESTED (8)
360 unsigned long flags;
361
325 struct wl1271_partition_set part; 362 struct wl1271_partition_set part;
326 363
327 struct wl1271_chip chip; 364 struct wl1271_chip chip;
@@ -331,8 +368,7 @@ struct wl1271 {
331 368
332 u8 *fw; 369 u8 *fw;
333 size_t fw_len; 370 size_t fw_len;
334 u8 *nvs; 371 struct wl1271_nvs_file *nvs;
335 size_t nvs_len;
336 372
337 u8 bssid[ETH_ALEN]; 373 u8 bssid[ETH_ALEN];
338 u8 mac_addr[ETH_ALEN]; 374 u8 mac_addr[ETH_ALEN];
@@ -359,7 +395,6 @@ struct wl1271 {
359 395
360 /* Frames scheduled for transmission, not handled yet */ 396 /* Frames scheduled for transmission, not handled yet */
361 struct sk_buff_head tx_queue; 397 struct sk_buff_head tx_queue;
362 bool tx_queue_stopped;
363 398
364 struct work_struct tx_work; 399 struct work_struct tx_work;
365 400
@@ -387,14 +422,15 @@ struct wl1271 {
387 u32 mbox_ptr[2]; 422 u32 mbox_ptr[2];
388 423
389 /* Are we currently scanning */ 424 /* Are we currently scanning */
390 bool scanning;
391 struct wl1271_scan scan; 425 struct wl1271_scan scan;
392 426
393 /* Our association ID */ 427 /* Our association ID */
394 u16 aid; 428 u16 aid;
395 429
396 /* currently configured rate set */ 430 /* currently configured rate set */
431 u32 sta_rate_set;
397 u32 basic_rate_set; 432 u32 basic_rate_set;
433 u32 rate_set;
398 434
399 /* The current band */ 435 /* The current band */
400 enum ieee80211_band band; 436 enum ieee80211_band band;
@@ -405,18 +441,9 @@ struct wl1271 {
405 unsigned int rx_config; 441 unsigned int rx_config;
406 unsigned int rx_filter; 442 unsigned int rx_filter;
407 443
408 /* is firmware in elp mode */
409 bool elp;
410
411 struct completion *elp_compl; 444 struct completion *elp_compl;
412 struct delayed_work elp_work; 445 struct delayed_work elp_work;
413 446
414 /* we can be in psm, but not in elp, we have to differentiate */
415 bool psm;
416
417 /* PSM mode requested */
418 bool psm_requested;
419
420 /* retry counter for PSM entries */ 447 /* retry counter for PSM entries */
421 u8 psm_entry_retry; 448 u8 psm_entry_retry;
422 449
@@ -435,9 +462,6 @@ struct wl1271 {
435 462
436 struct ieee80211_vif *vif; 463 struct ieee80211_vif *vif;
437 464
438 /* Used for a workaround to send disconnect before rejoining */
439 bool joined;
440
441 /* Current chipset configuration */ 465 /* Current chipset configuration */
442 struct conf_drv_settings conf; 466 struct conf_drv_settings conf;
443 467
@@ -455,11 +479,14 @@ int wl1271_plt_stop(struct wl1271 *wl);
455 479
456#define WL1271_TX_QUEUE_MAX_LENGTH 20 480#define WL1271_TX_QUEUE_MAX_LENGTH 20
457 481
458/* WL1271 needs a 200ms sleep after power on */ 482/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
483 on in case is has been shut down shortly before */
484#define WL1271_PRE_POWER_ON_SLEEP 20 /* in miliseconds */
459#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */ 485#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */
460 486
461static inline bool wl1271_11a_enabled(void) 487static inline bool wl1271_11a_enabled(void)
462{ 488{
489 /* FIXME: this could be determined based on the NVS-INI file */
463#ifdef WL1271_80211A_ENABLED 490#ifdef WL1271_80211A_ENABLED
464 return true; 491 return true;
465#else 492#else
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c
index 5cc89bbdac7a..60f10dce4800 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.c
@@ -390,6 +390,35 @@ out:
390 return ret; 390 return ret;
391} 391}
392 392
393int wl1271_acx_dco_itrim_params(struct wl1271 *wl)
394{
395 struct acx_dco_itrim_params *dco;
396 struct conf_itrim_settings *c = &wl->conf.itrim;
397 int ret;
398
399 wl1271_debug(DEBUG_ACX, "acx dco itrim parameters");
400
401 dco = kzalloc(sizeof(*dco), GFP_KERNEL);
402 if (!dco) {
403 ret = -ENOMEM;
404 goto out;
405 }
406
407 dco->enable = c->enable;
408 dco->timeout = cpu_to_le32(c->timeout);
409
410 ret = wl1271_cmd_configure(wl, ACX_SET_DCO_ITRIM_PARAMS,
411 dco, sizeof(*dco));
412 if (ret < 0) {
413 wl1271_warning("failed to set dco itrim parameters: %d", ret);
414 goto out;
415 }
416
417out:
418 kfree(dco);
419 return ret;
420}
421
393int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter) 422int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
394{ 423{
395 struct acx_beacon_filter_option *beacon_filter = NULL; 424 struct acx_beacon_filter_option *beacon_filter = NULL;
@@ -758,10 +787,11 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
758 return 0; 787 return 0;
759} 788}
760 789
761int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates) 790int wl1271_acx_rate_policies(struct wl1271 *wl)
762{ 791{
763 struct acx_rate_policy *acx; 792 struct acx_rate_policy *acx;
764 struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf; 793 struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf;
794 int idx = 0;
765 int ret = 0; 795 int ret = 0;
766 796
767 wl1271_debug(DEBUG_ACX, "acx rate policies"); 797 wl1271_debug(DEBUG_ACX, "acx rate policies");
@@ -773,12 +803,21 @@ int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates)
773 goto out; 803 goto out;
774 } 804 }
775 805
776 /* configure one default (one-size-fits-all) rate class */ 806 /* configure one basic rate class */
777 acx->rate_class_cnt = cpu_to_le32(1); 807 idx = ACX_TX_BASIC_RATE;
778 acx->rate_class[0].enabled_rates = cpu_to_le32(enabled_rates); 808 acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->basic_rate_set);
779 acx->rate_class[0].short_retry_limit = c->short_retry_limit; 809 acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
780 acx->rate_class[0].long_retry_limit = c->long_retry_limit; 810 acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
781 acx->rate_class[0].aflags = c->aflags; 811 acx->rate_class[idx].aflags = c->aflags;
812
813 /* configure one AP supported rate class */
814 idx = ACX_TX_AP_FULL_RATE;
815 acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->rate_set);
816 acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
817 acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
818 acx->rate_class[idx].aflags = c->aflags;
819
820 acx->rate_class_cnt = cpu_to_le32(ACX_TX_RATE_POLICY_CNT);
782 821
783 ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); 822 ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
784 if (ret < 0) { 823 if (ret < 0) {
@@ -791,12 +830,14 @@ out:
791 return ret; 830 return ret;
792} 831}
793 832
794int wl1271_acx_ac_cfg(struct wl1271 *wl) 833int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
834 u8 aifsn, u16 txop)
795{ 835{
796 struct acx_ac_cfg *acx; 836 struct acx_ac_cfg *acx;
797 int i, ret = 0; 837 int ret = 0;
798 838
799 wl1271_debug(DEBUG_ACX, "acx access category config"); 839 wl1271_debug(DEBUG_ACX, "acx ac cfg %d cw_ming %d cw_max %d "
840 "aifs %d txop %d", ac, cw_min, cw_max, aifsn, txop);
800 841
801 acx = kzalloc(sizeof(*acx), GFP_KERNEL); 842 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
802 843
@@ -805,21 +846,16 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl)
805 goto out; 846 goto out;
806 } 847 }
807 848
808 for (i = 0; i < wl->conf.tx.ac_conf_count; i++) { 849 acx->ac = ac;
809 struct conf_tx_ac_category *c = &(wl->conf.tx.ac_conf[i]); 850 acx->cw_min = cw_min;
810 acx->ac = c->ac; 851 acx->cw_max = cpu_to_le16(cw_max);
811 acx->cw_min = c->cw_min; 852 acx->aifsn = aifsn;
812 acx->cw_max = cpu_to_le16(c->cw_max); 853 acx->tx_op_limit = cpu_to_le16(txop);
813 acx->aifsn = c->aifsn;
814 acx->reserved = 0;
815 acx->tx_op_limit = cpu_to_le16(c->tx_op_limit);
816 854
817 ret = wl1271_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx)); 855 ret = wl1271_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx));
818 if (ret < 0) { 856 if (ret < 0) {
819 wl1271_warning("Setting of access category " 857 wl1271_warning("acx ac cfg failed: %d", ret);
820 "config: %d", ret); 858 goto out;
821 goto out;
822 }
823 } 859 }
824 860
825out: 861out:
@@ -827,10 +863,12 @@ out:
827 return ret; 863 return ret;
828} 864}
829 865
830int wl1271_acx_tid_cfg(struct wl1271 *wl) 866int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
867 u8 tsid, u8 ps_scheme, u8 ack_policy,
868 u32 apsd_conf0, u32 apsd_conf1)
831{ 869{
832 struct acx_tid_config *acx; 870 struct acx_tid_config *acx;
833 int i, ret = 0; 871 int ret = 0;
834 872
835 wl1271_debug(DEBUG_ACX, "acx tid config"); 873 wl1271_debug(DEBUG_ACX, "acx tid config");
836 874
@@ -841,21 +879,18 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl)
841 goto out; 879 goto out;
842 } 880 }
843 881
844 for (i = 0; i < wl->conf.tx.tid_conf_count; i++) { 882 acx->queue_id = queue_id;
845 struct conf_tx_tid *c = &(wl->conf.tx.tid_conf[i]); 883 acx->channel_type = channel_type;
846 acx->queue_id = c->queue_id; 884 acx->tsid = tsid;
847 acx->channel_type = c->channel_type; 885 acx->ps_scheme = ps_scheme;
848 acx->tsid = c->tsid; 886 acx->ack_policy = ack_policy;
849 acx->ps_scheme = c->ps_scheme; 887 acx->apsd_conf[0] = cpu_to_le32(apsd_conf0);
850 acx->ack_policy = c->ack_policy; 888 acx->apsd_conf[1] = cpu_to_le32(apsd_conf1);
851 acx->apsd_conf[0] = cpu_to_le32(c->apsd_conf[0]);
852 acx->apsd_conf[1] = cpu_to_le32(c->apsd_conf[1]);
853 889
854 ret = wl1271_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx)); 890 ret = wl1271_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx));
855 if (ret < 0) { 891 if (ret < 0) {
856 wl1271_warning("Setting of tid config failed: %d", ret); 892 wl1271_warning("Setting of tid config failed: %d", ret);
857 goto out; 893 goto out;
858 }
859 } 894 }
860 895
861out: 896out:
@@ -1012,59 +1047,6 @@ out:
1012 return ret; 1047 return ret;
1013} 1048}
1014 1049
1015int wl1271_acx_smart_reflex(struct wl1271 *wl)
1016{
1017 struct acx_smart_reflex_state *sr_state = NULL;
1018 struct acx_smart_reflex_config_params *sr_param = NULL;
1019 int i, ret;
1020
1021 wl1271_debug(DEBUG_ACX, "acx smart reflex");
1022
1023 sr_param = kzalloc(sizeof(*sr_param), GFP_KERNEL);
1024 if (!sr_param) {
1025 ret = -ENOMEM;
1026 goto out;
1027 }
1028
1029 for (i = 0; i < CONF_SR_ERR_TBL_COUNT; i++) {
1030 struct conf_mart_reflex_err_table *e =
1031 &(wl->conf.init.sr_err_tbl[i]);
1032
1033 sr_param->error_table[i].len = e->len;
1034 sr_param->error_table[i].upper_limit = e->upper_limit;
1035 memcpy(sr_param->error_table[i].values, e->values, e->len);
1036 }
1037
1038 ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_PARAMS,
1039 sr_param, sizeof(*sr_param));
1040 if (ret < 0) {
1041 wl1271_warning("failed to set smart reflex params: %d", ret);
1042 goto out;
1043 }
1044
1045 sr_state = kzalloc(sizeof(*sr_state), GFP_KERNEL);
1046 if (!sr_state) {
1047 ret = -ENOMEM;
1048 goto out;
1049 }
1050
1051 /* enable smart reflex */
1052 sr_state->enable = wl->conf.init.sr_enable;
1053
1054 ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_STATE,
1055 sr_state, sizeof(*sr_state));
1056 if (ret < 0) {
1057 wl1271_warning("failed to set smart reflex params: %d", ret);
1058 goto out;
1059 }
1060
1061out:
1062 kfree(sr_state);
1063 kfree(sr_param);
1064 return ret;
1065
1066}
1067
1068int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable) 1050int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
1069{ 1051{
1070 struct wl1271_acx_bet_enable *acx = NULL; 1052 struct wl1271_acx_bet_enable *acx = NULL;
@@ -1132,3 +1114,31 @@ out:
1132 kfree(acx); 1114 kfree(acx);
1133 return ret; 1115 return ret;
1134} 1116}
1117
1118int wl1271_acx_pm_config(struct wl1271 *wl)
1119{
1120 struct wl1271_acx_pm_config *acx = NULL;
1121 struct conf_pm_config_settings *c = &wl->conf.pm_config;
1122 int ret = 0;
1123
1124 wl1271_debug(DEBUG_ACX, "acx pm config");
1125
1126 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1127 if (!acx) {
1128 ret = -ENOMEM;
1129 goto out;
1130 }
1131
1132 acx->host_clk_settling_time = cpu_to_le32(c->host_clk_settling_time);
1133 acx->host_fast_wakeup_support = c->host_fast_wakeup_support;
1134
1135 ret = wl1271_cmd_configure(wl, ACX_PM_CONFIG, acx, sizeof(*acx));
1136 if (ret < 0) {
1137 wl1271_warning("acx pm config failed: %d", ret);
1138 goto out;
1139 }
1140
1141out:
1142 kfree(acx);
1143 return ret;
1144}
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h
index 2ce0a8128542..aeccc98581eb 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.h
@@ -2,7 +2,7 @@
2 * This file is part of wl1271 2 * This file is part of wl1271
3 * 3 *
4 * Copyright (C) 1998-2009 Texas Instruments. All rights reserved. 4 * Copyright (C) 1998-2009 Texas Instruments. All rights reserved.
5 * Copyright (C) 2008-2009 Nokia Corporation 5 * Copyright (C) 2008-2010 Nokia Corporation
6 * 6 *
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com> 7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
8 * 8 *
@@ -348,7 +348,7 @@ struct acx_beacon_filter_option {
348 * ACXBeaconFilterEntry (not 221) 348 * ACXBeaconFilterEntry (not 221)
349 * Byte Offset Size (Bytes) Definition 349 * Byte Offset Size (Bytes) Definition
350 * =========== ============ ========== 350 * =========== ============ ==========
351 * 0 1 IE identifier 351 * 0 1 IE identifier
352 * 1 1 Treatment bit mask 352 * 1 1 Treatment bit mask
353 * 353 *
354 * ACXBeaconFilterEntry (221) 354 * ACXBeaconFilterEntry (221)
@@ -381,8 +381,8 @@ struct acx_beacon_filter_ie_table {
381 struct acx_header header; 381 struct acx_header header;
382 382
383 u8 num_ie; 383 u8 num_ie;
384 u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
385 u8 pad[3]; 384 u8 pad[3];
385 u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
386} __attribute__ ((packed)); 386} __attribute__ ((packed));
387 387
388struct acx_conn_monit_params { 388struct acx_conn_monit_params {
@@ -415,23 +415,12 @@ struct acx_bt_wlan_coex {
415 u8 pad[3]; 415 u8 pad[3];
416} __attribute__ ((packed)); 416} __attribute__ ((packed));
417 417
418struct acx_smart_reflex_state { 418struct acx_dco_itrim_params {
419 struct acx_header header; 419 struct acx_header header;
420 420
421 u8 enable; 421 u8 enable;
422 u8 padding[3]; 422 u8 padding[3];
423} __attribute__ ((packed)); 423 __le32 timeout;
424
425struct smart_reflex_err_table {
426 u8 len;
427 s8 upper_limit;
428 s8 values[14];
429} __attribute__ ((packed));
430
431struct acx_smart_reflex_config_params {
432 struct acx_header header;
433
434 struct smart_reflex_err_table error_table[3];
435} __attribute__ ((packed)); 424} __attribute__ ((packed));
436 425
437#define PTA_ANTENNA_TYPE_DEF (0) 426#define PTA_ANTENNA_TYPE_DEF (0)
@@ -837,6 +826,9 @@ struct acx_rate_class {
837 u8 reserved; 826 u8 reserved;
838}; 827};
839 828
829#define ACX_TX_BASIC_RATE 0
830#define ACX_TX_AP_FULL_RATE 1
831#define ACX_TX_RATE_POLICY_CNT 2
840struct acx_rate_policy { 832struct acx_rate_policy {
841 struct acx_header header; 833 struct acx_header header;
842 834
@@ -877,8 +869,8 @@ struct acx_tx_config_options {
877 __le16 tx_compl_threshold; /* number of packets */ 869 __le16 tx_compl_threshold; /* number of packets */
878} __attribute__ ((packed)); 870} __attribute__ ((packed));
879 871
880#define ACX_RX_MEM_BLOCKS 64 872#define ACX_RX_MEM_BLOCKS 70
881#define ACX_TX_MIN_MEM_BLOCKS 64 873#define ACX_TX_MIN_MEM_BLOCKS 40
882#define ACX_TX_DESCRIPTORS 32 874#define ACX_TX_DESCRIPTORS 32
883#define ACX_NUM_SSID_PROFILES 1 875#define ACX_NUM_SSID_PROFILES 1
884 876
@@ -969,6 +961,13 @@ struct wl1271_acx_arp_filter {
969 used. */ 961 used. */
970} __attribute__((packed)); 962} __attribute__((packed));
971 963
964struct wl1271_acx_pm_config {
965 struct acx_header header;
966
967 __le32 host_clk_settling_time;
968 u8 host_fast_wakeup_support;
969 u8 padding[3];
970} __attribute__ ((packed));
972 971
973enum { 972enum {
974 ACX_WAKE_UP_CONDITIONS = 0x0002, 973 ACX_WAKE_UP_CONDITIONS = 0x0002,
@@ -1027,13 +1026,13 @@ enum {
1027 ACX_HT_BSS_OPERATION = 0x0058, 1026 ACX_HT_BSS_OPERATION = 0x0058,
1028 ACX_COEX_ACTIVITY = 0x0059, 1027 ACX_COEX_ACTIVITY = 0x0059,
1029 ACX_SET_SMART_REFLEX_DEBUG = 0x005A, 1028 ACX_SET_SMART_REFLEX_DEBUG = 0x005A,
1030 ACX_SET_SMART_REFLEX_STATE = 0x005B, 1029 ACX_SET_DCO_ITRIM_PARAMS = 0x0061,
1031 ACX_SET_SMART_REFLEX_PARAMS = 0x005F,
1032 DOT11_RX_MSDU_LIFE_TIME = 0x1004, 1030 DOT11_RX_MSDU_LIFE_TIME = 0x1004,
1033 DOT11_CUR_TX_PWR = 0x100D, 1031 DOT11_CUR_TX_PWR = 0x100D,
1034 DOT11_RX_DOT11_MODE = 0x1012, 1032 DOT11_RX_DOT11_MODE = 0x1012,
1035 DOT11_RTS_THRESHOLD = 0x1013, 1033 DOT11_RTS_THRESHOLD = 0x1013,
1036 DOT11_GROUP_ADDRESS_TBL = 0x1014, 1034 DOT11_GROUP_ADDRESS_TBL = 0x1014,
1035 ACX_PM_CONFIG = 0x1016,
1037 1036
1038 MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL, 1037 MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL,
1039 1038
@@ -1056,6 +1055,7 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
1056 void *mc_list, u32 mc_list_len); 1055 void *mc_list, u32 mc_list_len);
1057int wl1271_acx_service_period_timeout(struct wl1271 *wl); 1056int wl1271_acx_service_period_timeout(struct wl1271 *wl);
1058int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold); 1057int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold);
1058int wl1271_acx_dco_itrim_params(struct wl1271 *wl);
1059int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter); 1059int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
1060int wl1271_acx_beacon_filter_table(struct wl1271 *wl); 1060int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
1061int wl1271_acx_conn_monit_params(struct wl1271 *wl); 1061int wl1271_acx_conn_monit_params(struct wl1271 *wl);
@@ -1069,9 +1069,12 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
1069int wl1271_acx_cts_protect(struct wl1271 *wl, 1069int wl1271_acx_cts_protect(struct wl1271 *wl,
1070 enum acx_ctsprotect_type ctsprotect); 1070 enum acx_ctsprotect_type ctsprotect);
1071int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats); 1071int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
1072int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates); 1072int wl1271_acx_rate_policies(struct wl1271 *wl);
1073int wl1271_acx_ac_cfg(struct wl1271 *wl); 1073int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
1074int wl1271_acx_tid_cfg(struct wl1271 *wl); 1074 u8 aifsn, u16 txop);
1075int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
1076 u8 tsid, u8 ps_scheme, u8 ack_policy,
1077 u32 apsd_conf0, u32 apsd_conf1);
1075int wl1271_acx_frag_threshold(struct wl1271 *wl); 1078int wl1271_acx_frag_threshold(struct wl1271 *wl);
1076int wl1271_acx_tx_config_options(struct wl1271 *wl); 1079int wl1271_acx_tx_config_options(struct wl1271 *wl);
1077int wl1271_acx_mem_cfg(struct wl1271 *wl); 1080int wl1271_acx_mem_cfg(struct wl1271 *wl);
@@ -1081,5 +1084,6 @@ int wl1271_acx_smart_reflex(struct wl1271 *wl);
1081int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable); 1084int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
1082int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address, 1085int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
1083 u8 version); 1086 u8 version);
1087int wl1271_acx_pm_config(struct wl1271 *wl);
1084 1088
1085#endif /* __WL1271_ACX_H__ */ 1089#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c
index b7c96454cca3..2be76ee42bb9 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.c
@@ -27,6 +27,7 @@
27#include "wl1271_reg.h" 27#include "wl1271_reg.h"
28#include "wl1271_boot.h" 28#include "wl1271_boot.h"
29#include "wl1271_spi.h" 29#include "wl1271_spi.h"
30#include "wl1271_io.h"
30#include "wl1271_event.h" 31#include "wl1271_event.h"
31 32
32static struct wl1271_partition_set part_table[PART_TABLE_LEN] = { 33static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
@@ -93,19 +94,19 @@ static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
93 u32 cpu_ctrl; 94 u32 cpu_ctrl;
94 95
95 /* 10.5.0 run the firmware (I) */ 96 /* 10.5.0 run the firmware (I) */
96 cpu_ctrl = wl1271_spi_read32(wl, ACX_REG_ECPU_CONTROL); 97 cpu_ctrl = wl1271_read32(wl, ACX_REG_ECPU_CONTROL);
97 98
98 /* 10.5.1 run the firmware (II) */ 99 /* 10.5.1 run the firmware (II) */
99 cpu_ctrl |= flag; 100 cpu_ctrl |= flag;
100 wl1271_spi_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl); 101 wl1271_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
101} 102}
102 103
103static void wl1271_boot_fw_version(struct wl1271 *wl) 104static void wl1271_boot_fw_version(struct wl1271 *wl)
104{ 105{
105 struct wl1271_static_data static_data; 106 struct wl1271_static_data static_data;
106 107
107 wl1271_spi_read(wl, wl->cmd_box_addr, 108 wl1271_read(wl, wl->cmd_box_addr, &static_data, sizeof(static_data),
108 &static_data, sizeof(static_data), false); 109 false);
109 110
110 strncpy(wl->chip.fw_ver, static_data.fw_version, 111 strncpy(wl->chip.fw_ver, static_data.fw_version,
111 sizeof(wl->chip.fw_ver)); 112 sizeof(wl->chip.fw_ver));
@@ -164,7 +165,7 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
164 memcpy(chunk, p, CHUNK_SIZE); 165 memcpy(chunk, p, CHUNK_SIZE);
165 wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x", 166 wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
166 p, addr); 167 p, addr);
167 wl1271_spi_write(wl, addr, chunk, CHUNK_SIZE, false); 168 wl1271_write(wl, addr, chunk, CHUNK_SIZE, false);
168 169
169 chunk_num++; 170 chunk_num++;
170 } 171 }
@@ -175,7 +176,7 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
175 memcpy(chunk, p, fw_data_len % CHUNK_SIZE); 176 memcpy(chunk, p, fw_data_len % CHUNK_SIZE);
176 wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x", 177 wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x",
177 fw_data_len % CHUNK_SIZE, p, addr); 178 fw_data_len % CHUNK_SIZE, p, addr);
178 wl1271_spi_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false); 179 wl1271_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
179 180
180 kfree(chunk); 181 kfree(chunk);
181 return 0; 182 return 0;
@@ -219,23 +220,14 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
219 size_t nvs_len, burst_len; 220 size_t nvs_len, burst_len;
220 int i; 221 int i;
221 u32 dest_addr, val; 222 u32 dest_addr, val;
222 u8 *nvs_ptr, *nvs, *nvs_aligned; 223 u8 *nvs_ptr, *nvs_aligned;
223 224
224 nvs = wl->nvs; 225 if (wl->nvs == NULL)
225 if (nvs == NULL)
226 return -ENODEV; 226 return -ENODEV;
227 227
228 nvs_ptr = nvs; 228 /* only the first part of the NVS needs to be uploaded */
229 229 nvs_len = sizeof(wl->nvs->nvs);
230 nvs_len = wl->nvs_len; 230 nvs_ptr = (u8 *)wl->nvs->nvs;
231
232 /* Update the device MAC address into the nvs */
233 nvs[11] = wl->mac_addr[0];
234 nvs[10] = wl->mac_addr[1];
235 nvs[6] = wl->mac_addr[2];
236 nvs[5] = wl->mac_addr[3];
237 nvs[4] = wl->mac_addr[4];
238 nvs[3] = wl->mac_addr[5];
239 231
240 /* 232 /*
241 * Layout before the actual NVS tables: 233 * Layout before the actual NVS tables:
@@ -265,7 +257,7 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
265 wl1271_debug(DEBUG_BOOT, 257 wl1271_debug(DEBUG_BOOT,
266 "nvs burst write 0x%x: 0x%x", 258 "nvs burst write 0x%x: 0x%x",
267 dest_addr, val); 259 dest_addr, val);
268 wl1271_spi_write32(wl, dest_addr, val); 260 wl1271_write32(wl, dest_addr, val);
269 261
270 nvs_ptr += 4; 262 nvs_ptr += 4;
271 dest_addr += 4; 263 dest_addr += 4;
@@ -277,7 +269,7 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
277 * is 7 bytes further. 269 * is 7 bytes further.
278 */ 270 */
279 nvs_ptr += 7; 271 nvs_ptr += 7;
280 nvs_len -= nvs_ptr - nvs; 272 nvs_len -= nvs_ptr - (u8 *)wl->nvs->nvs;
281 nvs_len = ALIGN(nvs_len, 4); 273 nvs_len = ALIGN(nvs_len, 4);
282 274
283 /* FIXME: The driver sets the partition here, but this is not needed, 275 /* FIXME: The driver sets the partition here, but this is not needed,
@@ -286,15 +278,20 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
286 wl1271_set_partition(wl, &part_table[PART_WORK]); 278 wl1271_set_partition(wl, &part_table[PART_WORK]);
287 279
288 /* Copy the NVS tables to a new block to ensure alignment */ 280 /* Copy the NVS tables to a new block to ensure alignment */
289 nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL); 281 /* FIXME: We jump 3 more bytes before uploading the NVS. It seems
290 if (!nvs_aligned) 282 that our NVS files have three extra zeros here. I'm not sure whether
291 return -ENOMEM; 283 the problem is in our NVS generation or we should really jumpt these
284 3 bytes here */
285 nvs_ptr += 3;
286
287 nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL); if
288 (!nvs_aligned) return -ENOMEM;
292 289
293 /* And finally we upload the NVS tables */ 290 /* And finally we upload the NVS tables */
294 /* FIXME: In wl1271, we upload everything at once. 291 /* FIXME: In wl1271, we upload everything at once.
295 No endianness handling needed here?! The ref driver doesn't do 292 No endianness handling needed here?! The ref driver doesn't do
296 anything about it at this point */ 293 anything about it at this point */
297 wl1271_spi_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len, false); 294 wl1271_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len, false);
298 295
299 kfree(nvs_aligned); 296 kfree(nvs_aligned);
300 return 0; 297 return 0;
@@ -303,9 +300,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
303static void wl1271_boot_enable_interrupts(struct wl1271 *wl) 300static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
304{ 301{
305 enable_irq(wl->irq); 302 enable_irq(wl->irq);
306 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK, 303 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
307 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK)); 304 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
308 wl1271_spi_write32(wl, HI_CFG, HI_CFG_DEF_VAL); 305 wl1271_write32(wl, HI_CFG, HI_CFG_DEF_VAL);
309} 306}
310 307
311static int wl1271_boot_soft_reset(struct wl1271 *wl) 308static int wl1271_boot_soft_reset(struct wl1271 *wl)
@@ -314,13 +311,12 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
314 u32 boot_data; 311 u32 boot_data;
315 312
316 /* perform soft reset */ 313 /* perform soft reset */
317 wl1271_spi_write32(wl, ACX_REG_SLV_SOFT_RESET, 314 wl1271_write32(wl, ACX_REG_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT);
318 ACX_SLV_SOFT_RESET_BIT);
319 315
320 /* SOFT_RESET is self clearing */ 316 /* SOFT_RESET is self clearing */
321 timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME); 317 timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME);
322 while (1) { 318 while (1) {
323 boot_data = wl1271_spi_read32(wl, ACX_REG_SLV_SOFT_RESET); 319 boot_data = wl1271_read32(wl, ACX_REG_SLV_SOFT_RESET);
324 wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data); 320 wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data);
325 if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0) 321 if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0)
326 break; 322 break;
@@ -336,10 +332,10 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
336 } 332 }
337 333
338 /* disable Rx/Tx */ 334 /* disable Rx/Tx */
339 wl1271_spi_write32(wl, ENABLE, 0x0); 335 wl1271_write32(wl, ENABLE, 0x0);
340 336
341 /* disable auto calibration on start*/ 337 /* disable auto calibration on start*/
342 wl1271_spi_write32(wl, SPARE_A2, 0xffff); 338 wl1271_write32(wl, SPARE_A2, 0xffff);
343 339
344 return 0; 340 return 0;
345} 341}
@@ -351,7 +347,7 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
351 347
352 wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT); 348 wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
353 349
354 chip_id = wl1271_spi_read32(wl, CHIP_ID_B); 350 chip_id = wl1271_read32(wl, CHIP_ID_B);
355 351
356 wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id); 352 wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id);
357 353
@@ -364,8 +360,7 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
364 loop = 0; 360 loop = 0;
365 while (loop++ < INIT_LOOP) { 361 while (loop++ < INIT_LOOP) {
366 udelay(INIT_LOOP_DELAY); 362 udelay(INIT_LOOP_DELAY);
367 interrupt = wl1271_spi_read32(wl, 363 interrupt = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
368 ACX_REG_INTERRUPT_NO_CLEAR);
369 364
370 if (interrupt == 0xffffffff) { 365 if (interrupt == 0xffffffff) {
371 wl1271_error("error reading hardware complete " 366 wl1271_error("error reading hardware complete "
@@ -374,8 +369,8 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
374 } 369 }
375 /* check that ACX_INTR_INIT_COMPLETE is enabled */ 370 /* check that ACX_INTR_INIT_COMPLETE is enabled */
376 else if (interrupt & WL1271_ACX_INTR_INIT_COMPLETE) { 371 else if (interrupt & WL1271_ACX_INTR_INIT_COMPLETE) {
377 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_ACK, 372 wl1271_write32(wl, ACX_REG_INTERRUPT_ACK,
378 WL1271_ACX_INTR_INIT_COMPLETE); 373 WL1271_ACX_INTR_INIT_COMPLETE);
379 break; 374 break;
380 } 375 }
381 } 376 }
@@ -387,10 +382,10 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
387 } 382 }
388 383
389 /* get hardware config command mail box */ 384 /* get hardware config command mail box */
390 wl->cmd_box_addr = wl1271_spi_read32(wl, REG_COMMAND_MAILBOX_PTR); 385 wl->cmd_box_addr = wl1271_read32(wl, REG_COMMAND_MAILBOX_PTR);
391 386
392 /* get hardware config event mail box */ 387 /* get hardware config event mail box */
393 wl->event_box_addr = wl1271_spi_read32(wl, REG_EVENT_MAILBOX_PTR); 388 wl->event_box_addr = wl1271_read32(wl, REG_EVENT_MAILBOX_PTR);
394 389
395 /* set the working partition to its "running" mode offset */ 390 /* set the working partition to its "running" mode offset */
396 wl1271_set_partition(wl, &part_table[PART_WORK]); 391 wl1271_set_partition(wl, &part_table[PART_WORK]);
@@ -463,9 +458,9 @@ int wl1271_boot(struct wl1271 *wl)
463 wl1271_top_reg_write(wl, OCP_REG_CLK_POLARITY, val); 458 wl1271_top_reg_write(wl, OCP_REG_CLK_POLARITY, val);
464 } 459 }
465 460
466 wl1271_spi_write32(wl, PLL_PARAMETERS, clk); 461 wl1271_write32(wl, PLL_PARAMETERS, clk);
467 462
468 pause = wl1271_spi_read32(wl, PLL_PARAMETERS); 463 pause = wl1271_read32(wl, PLL_PARAMETERS);
469 464
470 wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause); 465 wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause);
471 466
@@ -474,10 +469,10 @@ int wl1271_boot(struct wl1271 *wl)
474 * 0x3ff (magic number ). How does 469 * 0x3ff (magic number ). How does
475 * this work?! */ 470 * this work?! */
476 pause |= WU_COUNTER_PAUSE_VAL; 471 pause |= WU_COUNTER_PAUSE_VAL;
477 wl1271_spi_write32(wl, WU_COUNTER_PAUSE, pause); 472 wl1271_write32(wl, WU_COUNTER_PAUSE, pause);
478 473
479 /* Continue the ELP wake up sequence */ 474 /* Continue the ELP wake up sequence */
480 wl1271_spi_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL); 475 wl1271_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
481 udelay(500); 476 udelay(500);
482 477
483 wl1271_set_partition(wl, &part_table[PART_DRPW]); 478 wl1271_set_partition(wl, &part_table[PART_DRPW]);
@@ -487,18 +482,18 @@ int wl1271_boot(struct wl1271 *wl)
487 before taking DRPw out of reset */ 482 before taking DRPw out of reset */
488 483
489 wl1271_debug(DEBUG_BOOT, "DRPW_SCRATCH_START %08x", DRPW_SCRATCH_START); 484 wl1271_debug(DEBUG_BOOT, "DRPW_SCRATCH_START %08x", DRPW_SCRATCH_START);
490 clk = wl1271_spi_read32(wl, DRPW_SCRATCH_START); 485 clk = wl1271_read32(wl, DRPW_SCRATCH_START);
491 486
492 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk); 487 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
493 488
494 /* 2 */ 489 /* 2 */
495 clk |= (REF_CLOCK << 1) << 4; 490 clk |= (REF_CLOCK << 1) << 4;
496 wl1271_spi_write32(wl, DRPW_SCRATCH_START, clk); 491 wl1271_write32(wl, DRPW_SCRATCH_START, clk);
497 492
498 wl1271_set_partition(wl, &part_table[PART_WORK]); 493 wl1271_set_partition(wl, &part_table[PART_WORK]);
499 494
500 /* Disable interrupts */ 495 /* Disable interrupts */
501 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL); 496 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
502 497
503 ret = wl1271_boot_soft_reset(wl); 498 ret = wl1271_boot_soft_reset(wl);
504 if (ret < 0) 499 if (ret < 0)
@@ -513,23 +508,22 @@ int wl1271_boot(struct wl1271 *wl)
513 * ACX_EEPROMLESS_IND_REG */ 508 * ACX_EEPROMLESS_IND_REG */
514 wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG"); 509 wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG");
515 510
516 wl1271_spi_write32(wl, ACX_EEPROMLESS_IND_REG, 511 wl1271_write32(wl, ACX_EEPROMLESS_IND_REG, ACX_EEPROMLESS_IND_REG);
517 ACX_EEPROMLESS_IND_REG);
518 512
519 tmp = wl1271_spi_read32(wl, CHIP_ID_B); 513 tmp = wl1271_read32(wl, CHIP_ID_B);
520 514
521 wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp); 515 wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
522 516
523 /* 6. read the EEPROM parameters */ 517 /* 6. read the EEPROM parameters */
524 tmp = wl1271_spi_read32(wl, SCR_PAD2); 518 tmp = wl1271_read32(wl, SCR_PAD2);
525 519
526 ret = wl1271_boot_write_irq_polarity(wl); 520 ret = wl1271_boot_write_irq_polarity(wl);
527 if (ret < 0) 521 if (ret < 0)
528 goto out; 522 goto out;
529 523
530 /* FIXME: Need to check whether this is really what we want */ 524 /* FIXME: Need to check whether this is really what we want */
531 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK, 525 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
532 WL1271_ACX_ALL_EVENTS_VECTOR); 526 WL1271_ACX_ALL_EVENTS_VECTOR);
533 527
534 /* WL1271: The reference driver skips steps 7 to 10 (jumps directly 528 /* WL1271: The reference driver skips steps 7 to 10 (jumps directly
535 * to upload_fw) */ 529 * to upload_fw) */
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index c3385b3d246c..36a64e06f290 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -30,6 +30,7 @@
30#include "wl1271.h" 30#include "wl1271.h"
31#include "wl1271_reg.h" 31#include "wl1271_reg.h"
32#include "wl1271_spi.h" 32#include "wl1271_spi.h"
33#include "wl1271_io.h"
33#include "wl1271_acx.h" 34#include "wl1271_acx.h"
34#include "wl12xx_80211.h" 35#include "wl12xx_80211.h"
35#include "wl1271_cmd.h" 36#include "wl1271_cmd.h"
@@ -57,13 +58,13 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
57 58
58 WARN_ON(len % 4 != 0); 59 WARN_ON(len % 4 != 0);
59 60
60 wl1271_spi_write(wl, wl->cmd_box_addr, buf, len, false); 61 wl1271_write(wl, wl->cmd_box_addr, buf, len, false);
61 62
62 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD); 63 wl1271_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD);
63 64
64 timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT); 65 timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT);
65 66
66 intr = wl1271_spi_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); 67 intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
67 while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) { 68 while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) {
68 if (time_after(jiffies, timeout)) { 69 if (time_after(jiffies, timeout)) {
69 wl1271_error("command complete timeout"); 70 wl1271_error("command complete timeout");
@@ -73,13 +74,13 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
73 74
74 msleep(1); 75 msleep(1);
75 76
76 intr = wl1271_spi_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); 77 intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
77 } 78 }
78 79
79 /* read back the status code of the command */ 80 /* read back the status code of the command */
80 if (res_len == 0) 81 if (res_len == 0)
81 res_len = sizeof(struct wl1271_cmd_header); 82 res_len = sizeof(struct wl1271_cmd_header);
82 wl1271_spi_read(wl, wl->cmd_box_addr, cmd, res_len, false); 83 wl1271_read(wl, wl->cmd_box_addr, cmd, res_len, false);
83 84
84 status = le16_to_cpu(cmd->status); 85 status = le16_to_cpu(cmd->status);
85 if (status != CMD_STATUS_SUCCESS) { 86 if (status != CMD_STATUS_SUCCESS) {
@@ -87,8 +88,8 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
87 ret = -EIO; 88 ret = -EIO;
88 } 89 }
89 90
90 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_ACK, 91 wl1271_write32(wl, ACX_REG_INTERRUPT_ACK,
91 WL1271_ACX_INTR_CMD_COMPLETE); 92 WL1271_ACX_INTR_CMD_COMPLETE);
92 93
93out: 94out:
94 return ret; 95 return ret;
@@ -191,23 +192,19 @@ static int wl1271_cmd_cal(struct wl1271 *wl)
191int wl1271_cmd_general_parms(struct wl1271 *wl) 192int wl1271_cmd_general_parms(struct wl1271 *wl)
192{ 193{
193 struct wl1271_general_parms_cmd *gen_parms; 194 struct wl1271_general_parms_cmd *gen_parms;
194 struct conf_general_parms *g = &wl->conf.init.genparam;
195 int ret; 195 int ret;
196 196
197 if (!wl->nvs)
198 return -ENODEV;
199
197 gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL); 200 gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
198 if (!gen_parms) 201 if (!gen_parms)
199 return -ENOMEM; 202 return -ENOMEM;
200 203
201 gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM; 204 gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
202 205
203 gen_parms->ref_clk = g->ref_clk; 206 memcpy(gen_parms->params, wl->nvs->general_params,
204 gen_parms->settling_time = g->settling_time; 207 WL1271_NVS_GENERAL_PARAMS_SIZE);
205 gen_parms->clk_valid_on_wakeup = g->clk_valid_on_wakeup;
206 gen_parms->dc2dcmode = g->dc2dcmode;
207 gen_parms->single_dual_band = g->single_dual_band;
208 gen_parms->tx_bip_fem_autodetect = g->tx_bip_fem_autodetect;
209 gen_parms->tx_bip_fem_manufacturer = g->tx_bip_fem_manufacturer;
210 gen_parms->settings = g->settings;
211 208
212 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0); 209 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0);
213 if (ret < 0) 210 if (ret < 0)
@@ -220,8 +217,11 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
220int wl1271_cmd_radio_parms(struct wl1271 *wl) 217int wl1271_cmd_radio_parms(struct wl1271 *wl)
221{ 218{
222 struct wl1271_radio_parms_cmd *radio_parms; 219 struct wl1271_radio_parms_cmd *radio_parms;
223 struct conf_radio_parms *r = &wl->conf.init.radioparam; 220 struct conf_radio_parms *rparam = &wl->conf.init.radioparam;
224 int i, ret; 221 int ret;
222
223 if (!wl->nvs)
224 return -ENODEV;
225 225
226 radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL); 226 radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
227 if (!radio_parms) 227 if (!radio_parms)
@@ -229,60 +229,13 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
229 229
230 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM; 230 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
231 231
232 /* Static radio parameters */ 232 memcpy(radio_parms->stat_radio_params, wl->nvs->stat_radio_params,
233 radio_parms->rx_trace_loss = r->rx_trace_loss; 233 WL1271_NVS_STAT_RADIO_PARAMS_SIZE);
234 radio_parms->tx_trace_loss = r->tx_trace_loss; 234 memcpy(radio_parms->dyn_radio_params,
235 memcpy(radio_parms->rx_rssi_and_proc_compens, 235 wl->nvs->dyn_radio_params[rparam->fem],
236 r->rx_rssi_and_proc_compens, 236 WL1271_NVS_DYN_RADIO_PARAMS_SIZE);
237 CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE); 237
238 238 /* FIXME: current NVS is missing 5GHz parameters */
239 memcpy(radio_parms->rx_trace_loss_5, r->rx_trace_loss_5,
240 CONF_NUMBER_OF_SUB_BANDS_5);
241 memcpy(radio_parms->tx_trace_loss_5, r->tx_trace_loss_5,
242 CONF_NUMBER_OF_SUB_BANDS_5);
243 memcpy(radio_parms->rx_rssi_and_proc_compens_5,
244 r->rx_rssi_and_proc_compens_5,
245 CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE);
246
247 /* Dynamic radio parameters */
248 radio_parms->tx_ref_pd_voltage = cpu_to_le16(r->tx_ref_pd_voltage);
249 radio_parms->tx_ref_power = r->tx_ref_power;
250 radio_parms->tx_offset_db = r->tx_offset_db;
251
252 memcpy(radio_parms->tx_rate_limits_normal, r->tx_rate_limits_normal,
253 CONF_NUMBER_OF_RATE_GROUPS);
254 memcpy(radio_parms->tx_rate_limits_degraded, r->tx_rate_limits_degraded,
255 CONF_NUMBER_OF_RATE_GROUPS);
256
257 memcpy(radio_parms->tx_channel_limits_11b, r->tx_channel_limits_11b,
258 CONF_NUMBER_OF_CHANNELS_2_4);
259 memcpy(radio_parms->tx_channel_limits_ofdm, r->tx_channel_limits_ofdm,
260 CONF_NUMBER_OF_CHANNELS_2_4);
261 memcpy(radio_parms->tx_pdv_rate_offsets, r->tx_pdv_rate_offsets,
262 CONF_NUMBER_OF_RATE_GROUPS);
263 memcpy(radio_parms->tx_ibias, r->tx_ibias, CONF_NUMBER_OF_RATE_GROUPS);
264
265 radio_parms->rx_fem_insertion_loss = r->rx_fem_insertion_loss;
266
267 for (i = 0; i < CONF_NUMBER_OF_SUB_BANDS_5; i++)
268 radio_parms->tx_ref_pd_voltage_5[i] =
269 cpu_to_le16(r->tx_ref_pd_voltage_5[i]);
270 memcpy(radio_parms->tx_ref_power_5, r->tx_ref_power_5,
271 CONF_NUMBER_OF_SUB_BANDS_5);
272 memcpy(radio_parms->tx_offset_db_5, r->tx_offset_db_5,
273 CONF_NUMBER_OF_SUB_BANDS_5);
274 memcpy(radio_parms->tx_rate_limits_normal_5,
275 r->tx_rate_limits_normal_5, CONF_NUMBER_OF_RATE_GROUPS);
276 memcpy(radio_parms->tx_rate_limits_degraded_5,
277 r->tx_rate_limits_degraded_5, CONF_NUMBER_OF_RATE_GROUPS);
278 memcpy(radio_parms->tx_channel_limits_ofdm_5,
279 r->tx_channel_limits_ofdm_5, CONF_NUMBER_OF_CHANNELS_5);
280 memcpy(radio_parms->tx_pdv_rate_offsets_5, r->tx_pdv_rate_offsets_5,
281 CONF_NUMBER_OF_RATE_GROUPS);
282 memcpy(radio_parms->tx_ibias_5, r->tx_ibias_5,
283 CONF_NUMBER_OF_RATE_GROUPS);
284 memcpy(radio_parms->rx_fem_insertion_loss_5,
285 r->rx_fem_insertion_loss_5, CONF_NUMBER_OF_SUB_BANDS_5);
286 239
287 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ", 240 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
288 radio_parms, sizeof(*radio_parms)); 241 radio_parms, sizeof(*radio_parms));
@@ -311,19 +264,6 @@ int wl1271_cmd_join(struct wl1271 *wl)
311 do_cal = false; 264 do_cal = false;
312 } 265 }
313 266
314 /* FIXME: This is a workaround, because with the current stack, we
315 * cannot know when we have disassociated. So, if we have already
316 * joined, we disconnect before joining again. */
317 if (wl->joined) {
318 ret = wl1271_cmd_disconnect(wl);
319 if (ret < 0) {
320 wl1271_error("failed to disconnect before rejoining");
321 goto out;
322 }
323
324 wl->joined = false;
325 }
326
327 join = kzalloc(sizeof(*join), GFP_KERNEL); 267 join = kzalloc(sizeof(*join), GFP_KERNEL);
328 if (!join) { 268 if (!join) {
329 ret = -ENOMEM; 269 ret = -ENOMEM;
@@ -388,8 +328,6 @@ int wl1271_cmd_join(struct wl1271 *wl)
388 goto out_free; 328 goto out_free;
389 } 329 }
390 330
391 wl->joined = true;
392
393 /* 331 /*
394 * ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to 332 * ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to
395 * simplify locking we just sleep instead, for now 333 * simplify locking we just sleep instead, for now
@@ -487,7 +425,7 @@ int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
487 return 0; 425 return 0;
488} 426}
489 427
490int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable) 428int wl1271_cmd_data_path(struct wl1271 *wl, bool enable)
491{ 429{
492 struct cmd_enabledisable_path *cmd; 430 struct cmd_enabledisable_path *cmd;
493 int ret; 431 int ret;
@@ -501,7 +439,8 @@ int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
501 goto out; 439 goto out;
502 } 440 }
503 441
504 cmd->channel = channel; 442 /* the channel here is only used for calibration, so hardcoded to 1 */
443 cmd->channel = 1;
505 444
506 if (enable) { 445 if (enable) {
507 cmd_rx = CMD_ENABLE_RX; 446 cmd_rx = CMD_ENABLE_RX;
@@ -514,29 +453,29 @@ int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
514 ret = wl1271_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd), 0); 453 ret = wl1271_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd), 0);
515 if (ret < 0) { 454 if (ret < 0) {
516 wl1271_error("rx %s cmd for channel %d failed", 455 wl1271_error("rx %s cmd for channel %d failed",
517 enable ? "start" : "stop", channel); 456 enable ? "start" : "stop", cmd->channel);
518 goto out; 457 goto out;
519 } 458 }
520 459
521 wl1271_debug(DEBUG_BOOT, "rx %s cmd channel %d", 460 wl1271_debug(DEBUG_BOOT, "rx %s cmd channel %d",
522 enable ? "start" : "stop", channel); 461 enable ? "start" : "stop", cmd->channel);
523 462
524 ret = wl1271_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd), 0); 463 ret = wl1271_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd), 0);
525 if (ret < 0) { 464 if (ret < 0) {
526 wl1271_error("tx %s cmd for channel %d failed", 465 wl1271_error("tx %s cmd for channel %d failed",
527 enable ? "start" : "stop", channel); 466 enable ? "start" : "stop", cmd->channel);
528 return ret; 467 return ret;
529 } 468 }
530 469
531 wl1271_debug(DEBUG_BOOT, "tx %s cmd channel %d", 470 wl1271_debug(DEBUG_BOOT, "tx %s cmd channel %d",
532 enable ? "start" : "stop", channel); 471 enable ? "start" : "stop", cmd->channel);
533 472
534out: 473out:
535 kfree(cmd); 474 kfree(cmd);
536 return ret; 475 return ret;
537} 476}
538 477
539int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode) 478int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send)
540{ 479{
541 struct wl1271_cmd_ps_params *ps_params = NULL; 480 struct wl1271_cmd_ps_params *ps_params = NULL;
542 int ret = 0; 481 int ret = 0;
@@ -557,7 +496,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
557 } 496 }
558 497
559 ps_params->ps_mode = ps_mode; 498 ps_params->ps_mode = ps_mode;
560 ps_params->send_null_data = 1; 499 ps_params->send_null_data = send;
561 ps_params->retries = 5; 500 ps_params->retries = 5;
562 ps_params->hang_over_period = 128; 501 ps_params->hang_over_period = 128;
563 ps_params->null_data_rate = cpu_to_le32(1); /* 1 Mbps */ 502 ps_params->null_data_rate = cpu_to_le32(1); /* 1 Mbps */
@@ -636,7 +575,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
636 channels = wl->hw->wiphy->bands[ieee_band]->channels; 575 channels = wl->hw->wiphy->bands[ieee_band]->channels;
637 n_ch = wl->hw->wiphy->bands[ieee_band]->n_channels; 576 n_ch = wl->hw->wiphy->bands[ieee_band]->n_channels;
638 577
639 if (wl->scanning) 578 if (test_bit(WL1271_FLAG_SCANNING, &wl->flags))
640 return -EINVAL; 579 return -EINVAL;
641 580
642 params = kzalloc(sizeof(*params), GFP_KERNEL); 581 params = kzalloc(sizeof(*params), GFP_KERNEL);
@@ -711,7 +650,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
711 650
712 wl1271_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params)); 651 wl1271_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params));
713 652
714 wl->scanning = true; 653 set_bit(WL1271_FLAG_SCANNING, &wl->flags);
715 if (wl1271_11a_enabled()) { 654 if (wl1271_11a_enabled()) {
716 wl->scan.state = band; 655 wl->scan.state = band;
717 if (band == WL1271_SCAN_BAND_DUAL) { 656 if (band == WL1271_SCAN_BAND_DUAL) {
@@ -729,7 +668,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
729 ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params), 0); 668 ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params), 0);
730 if (ret < 0) { 669 if (ret < 0) {
731 wl1271_error("SCAN failed"); 670 wl1271_error("SCAN failed");
732 wl->scanning = false; 671 clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
733 goto out; 672 goto out;
734 } 673 }
735 674
@@ -1003,7 +942,7 @@ int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
1003 ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0); 942 ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
1004 if (ret < 0) { 943 if (ret < 0) {
1005 wl1271_warning("could not set keys"); 944 wl1271_warning("could not set keys");
1006 goto out; 945 goto out;
1007 } 946 }
1008 947
1009out: 948out:
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h
index b4fa4acb9229..2dc06c73532b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h
@@ -37,8 +37,8 @@ int wl1271_cmd_join(struct wl1271 *wl);
37int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer); 37int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
38int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len); 38int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
39int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len); 39int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
40int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable); 40int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
41int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode); 41int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send);
42int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, 42int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
43 size_t len); 43 size_t len);
44int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len, 44int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
@@ -428,67 +428,24 @@ struct wl1271_general_parms_cmd {
428 428
429 struct wl1271_cmd_test_header test; 429 struct wl1271_cmd_test_header test;
430 430
431 u8 ref_clk; 431 u8 params[WL1271_NVS_GENERAL_PARAMS_SIZE];
432 u8 settling_time; 432 s8 reserved[23];
433 u8 clk_valid_on_wakeup;
434 u8 dc2dcmode;
435 u8 single_dual_band;
436
437 u8 tx_bip_fem_autodetect;
438 u8 tx_bip_fem_manufacturer;
439 u8 settings;
440} __attribute__ ((packed)); 433} __attribute__ ((packed));
441 434
435#define WL1271_STAT_RADIO_PARAMS_5_SIZE 29
436#define WL1271_DYN_RADIO_PARAMS_5_SIZE 104
437
442struct wl1271_radio_parms_cmd { 438struct wl1271_radio_parms_cmd {
443 struct wl1271_cmd_header header; 439 struct wl1271_cmd_header header;
444 440
445 struct wl1271_cmd_test_header test; 441 struct wl1271_cmd_test_header test;
446 442
447 /* Static radio parameters */ 443 u8 stat_radio_params[WL1271_NVS_STAT_RADIO_PARAMS_SIZE];
448 /* 2.4GHz */ 444 u8 stat_radio_params_5[WL1271_STAT_RADIO_PARAMS_5_SIZE];
449 u8 rx_trace_loss;
450 u8 tx_trace_loss;
451 s8 rx_rssi_and_proc_compens[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
452
453 /* 5GHz */
454 u8 rx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
455 u8 tx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
456 s8 rx_rssi_and_proc_compens_5[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
457
458 /* Dynamic radio parameters */
459 /* 2.4GHz */
460 __le16 tx_ref_pd_voltage;
461 s8 tx_ref_power;
462 s8 tx_offset_db;
463
464 s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
465 s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
466
467 s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
468 s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
469 s8 tx_pdv_rate_offsets[CONF_NUMBER_OF_RATE_GROUPS];
470
471 u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
472 u8 rx_fem_insertion_loss;
473 445
474 u8 padding2; 446 u8 dyn_radio_params[WL1271_NVS_DYN_RADIO_PARAMS_SIZE];
475 447 u8 reserved;
476 /* 5GHz */ 448 u8 dyn_radio_params_5[WL1271_DYN_RADIO_PARAMS_5_SIZE];
477 __le16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
478 s8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
479 s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
480
481 s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
482 s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
483
484 s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
485 s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
486
487 /* FIXME: this is inconsistent with the types for 2.4GHz */
488 s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
489 s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
490
491 u8 padding3[2];
492} __attribute__ ((packed)); 449} __attribute__ ((packed));
493 450
494struct wl1271_cmd_cal_channel_tune { 451struct wl1271_cmd_cal_channel_tune {
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/wl1271_conf.h
index 565373ede265..6f9e75cc5640 100644
--- a/drivers/net/wireless/wl12xx/wl1271_conf.h
+++ b/drivers/net/wireless/wl12xx/wl1271_conf.h
@@ -258,7 +258,8 @@ struct conf_rx_settings {
258#define CONF_TX_MAX_RATE_CLASSES 8 258#define CONF_TX_MAX_RATE_CLASSES 8
259 259
260#define CONF_TX_RATE_MASK_UNSPECIFIED 0 260#define CONF_TX_RATE_MASK_UNSPECIFIED 0
261#define CONF_TX_RATE_MASK_ALL 0x1eff 261#define CONF_TX_RATE_MASK_BASIC (CONF_HW_BIT_RATE_1MBPS | \
262 CONF_HW_BIT_RATE_2MBPS)
262#define CONF_TX_RATE_RETRY_LIMIT 10 263#define CONF_TX_RATE_RETRY_LIMIT 10
263 264
264struct conf_tx_rate_class { 265struct conf_tx_rate_class {
@@ -722,31 +723,6 @@ struct conf_conn_settings {
722 u8 psm_entry_retries; 723 u8 psm_entry_retries;
723}; 724};
724 725
725#define CONF_SR_ERR_TBL_MAX_VALUES 14
726
727struct conf_mart_reflex_err_table {
728 /*
729 * Length of the error table values table.
730 *
731 * Range: 0 - CONF_SR_ERR_TBL_MAX_VALUES
732 */
733 u8 len;
734
735 /*
736 * Smart Reflex error table upper limit.
737 *
738 * Range: s8
739 */
740 s8 upper_limit;
741
742 /*
743 * Smart Reflex error table values.
744 *
745 * Range: s8
746 */
747 s8 values[CONF_SR_ERR_TBL_MAX_VALUES];
748};
749
750enum { 726enum {
751 CONF_REF_CLK_19_2_E, 727 CONF_REF_CLK_19_2_E,
752 CONF_REF_CLK_26_E, 728 CONF_REF_CLK_26_E,
@@ -759,64 +735,6 @@ enum single_dual_band_enum {
759 CONF_DUAL_BAND 735 CONF_DUAL_BAND
760}; 736};
761 737
762struct conf_general_parms {
763 /*
764 * RF Reference Clock type / speed
765 *
766 * Range: CONF_REF_CLK_*
767 */
768 u8 ref_clk;
769
770 /*
771 * Settling time of the reference clock after boot.
772 *
773 * Range: u8
774 */
775 u8 settling_time;
776
777 /*
778 * Flag defining whether clock is valid on wakeup.
779 *
780 * Range: 0 - not valid on wakeup, 1 - valid on wakeup
781 */
782 u8 clk_valid_on_wakeup;
783
784 /*
785 * DC-to-DC mode.
786 *
787 * Range: Unknown
788 */
789 u8 dc2dcmode;
790
791 /*
792 * Flag defining whether used as single or dual-band.
793 *
794 * Range: CONF_SINGLE_BAND, CONF_DUAL_BAND
795 */
796 u8 single_dual_band;
797
798 /*
799 * TX bip fem autodetect flag.
800 *
801 * Range: Unknown
802 */
803 u8 tx_bip_fem_autodetect;
804
805 /*
806 * TX bip gem manufacturer.
807 *
808 * Range: Unknown
809 */
810 u8 tx_bip_fem_manufacturer;
811
812 /*
813 * Settings flags.
814 *
815 * Range: Unknown
816 */
817 u8 settings;
818};
819
820#define CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE 15 738#define CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE 15
821#define CONF_NUMBER_OF_SUB_BANDS_5 7 739#define CONF_NUMBER_OF_SUB_BANDS_5 7
822#define CONF_NUMBER_OF_RATE_GROUPS 6 740#define CONF_NUMBER_OF_RATE_GROUPS 6
@@ -825,87 +743,43 @@ struct conf_general_parms {
825 743
826struct conf_radio_parms { 744struct conf_radio_parms {
827 /* 745 /*
828 * Static radio parameters for 2.4GHz 746 * FEM parameter set to use
829 *
830 * Range: unknown
831 */
832 u8 rx_trace_loss;
833 u8 tx_trace_loss;
834 s8 rx_rssi_and_proc_compens[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
835
836 /*
837 * Static radio parameters for 5GHz
838 *
839 * Range: unknown
840 */
841 u8 rx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
842 u8 tx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
843 s8 rx_rssi_and_proc_compens_5[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
844
845 /*
846 * Dynamic radio parameters for 2.4GHz
847 * 747 *
848 * Range: unknown 748 * Range: 0 or 1
849 */ 749 */
850 s16 tx_ref_pd_voltage; 750 u8 fem;
851 s8 tx_ref_power; 751};
852 s8 tx_offset_db;
853
854 s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
855 s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
856
857 s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
858 s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
859 s8 tx_pdv_rate_offsets[CONF_NUMBER_OF_RATE_GROUPS];
860
861 u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
862 u8 rx_fem_insertion_loss;
863 752
753struct conf_init_settings {
864 /* 754 /*
865 * Dynamic radio parameters for 5GHz 755 * Configure radio parameters.
866 *
867 * Range: unknown
868 */ 756 */
869 s16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5]; 757 struct conf_radio_parms radioparam;
870 s8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
871 s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
872
873 s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
874 s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
875
876 s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
877 s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
878 758
879 /* FIXME: this is inconsistent with the types for 2.4GHz */
880 s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
881 s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
882}; 759};
883 760
884#define CONF_SR_ERR_TBL_COUNT 3 761struct conf_itrim_settings {
762 /* enable dco itrim */
763 u8 enable;
885 764
886struct conf_init_settings { 765 /* moderation timeout in microsecs from the last TX */
887 /* 766 u32 timeout;
888 * Configure Smart Reflex error table values. 767};
889 */
890 struct conf_mart_reflex_err_table sr_err_tbl[CONF_SR_ERR_TBL_COUNT];
891 768
769struct conf_pm_config_settings {
892 /* 770 /*
893 * Smart Reflex enable flag. 771 * Host clock settling time
894 * 772 *
895 * Range: 1 - Smart Reflex enabled, 0 - Smart Reflex disabled 773 * Range: 0 - 30000 us
896 */
897 u8 sr_enable;
898
899 /*
900 * Configure general parameters.
901 */ 774 */
902 struct conf_general_parms genparam; 775 u32 host_clk_settling_time;
903 776
904 /* 777 /*
905 * Configure radio parameters. 778 * Host fast wakeup support
779 *
780 * Range: true, false
906 */ 781 */
907 struct conf_radio_parms radioparam; 782 bool host_fast_wakeup_support;
908
909}; 783};
910 784
911struct conf_drv_settings { 785struct conf_drv_settings {
@@ -914,6 +788,8 @@ struct conf_drv_settings {
914 struct conf_tx_settings tx; 788 struct conf_tx_settings tx;
915 struct conf_conn_settings conn; 789 struct conf_conn_settings conn;
916 struct conf_init_settings init; 790 struct conf_init_settings init;
791 struct conf_itrim_settings itrim;
792 struct conf_pm_config_settings pm_config;
917}; 793};
918 794
919#endif 795#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.c b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
index c1805e5f8964..8d7588ca68fd 100644
--- a/drivers/net/wireless/wl12xx/wl1271_debugfs.c
+++ b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
@@ -237,6 +237,64 @@ static const struct file_operations tx_queue_len_ops = {
237 .open = wl1271_open_file_generic, 237 .open = wl1271_open_file_generic,
238}; 238};
239 239
240static ssize_t gpio_power_read(struct file *file, char __user *user_buf,
241 size_t count, loff_t *ppos)
242{
243 struct wl1271 *wl = file->private_data;
244 bool state = test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
245
246 int res;
247 char buf[10];
248
249 res = scnprintf(buf, sizeof(buf), "%d\n", state);
250
251 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
252}
253
254static ssize_t gpio_power_write(struct file *file,
255 const char __user *user_buf,
256 size_t count, loff_t *ppos)
257{
258 struct wl1271 *wl = file->private_data;
259 char buf[10];
260 size_t len;
261 unsigned long value;
262 int ret;
263
264 mutex_lock(&wl->mutex);
265
266 len = min(count, sizeof(buf) - 1);
267 if (copy_from_user(buf, user_buf, len)) {
268 ret = -EFAULT;
269 goto out;
270 }
271 buf[len] = '\0';
272
273 ret = strict_strtoul(buf, 0, &value);
274 if (ret < 0) {
275 wl1271_warning("illegal value in gpio_power");
276 goto out;
277 }
278
279 if (value) {
280 wl->set_power(true);
281 set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
282 } else {
283 wl->set_power(false);
284 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
285 }
286
287out:
288 mutex_unlock(&wl->mutex);
289 return count;
290}
291
292static const struct file_operations gpio_power_ops = {
293 .read = gpio_power_read,
294 .write = gpio_power_write,
295 .open = wl1271_open_file_generic
296};
297
240static void wl1271_debugfs_delete_files(struct wl1271 *wl) 298static void wl1271_debugfs_delete_files(struct wl1271 *wl)
241{ 299{
242 DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow); 300 DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow);
@@ -333,6 +391,8 @@ static void wl1271_debugfs_delete_files(struct wl1271 *wl)
333 DEBUGFS_DEL(tx_queue_len); 391 DEBUGFS_DEL(tx_queue_len);
334 DEBUGFS_DEL(retry_count); 392 DEBUGFS_DEL(retry_count);
335 DEBUGFS_DEL(excessive_retries); 393 DEBUGFS_DEL(excessive_retries);
394
395 DEBUGFS_DEL(gpio_power);
336} 396}
337 397
338static int wl1271_debugfs_add_files(struct wl1271 *wl) 398static int wl1271_debugfs_add_files(struct wl1271 *wl)
@@ -434,6 +494,8 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl)
434 DEBUGFS_ADD(retry_count, wl->debugfs.rootdir); 494 DEBUGFS_ADD(retry_count, wl->debugfs.rootdir);
435 DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir); 495 DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir);
436 496
497 DEBUGFS_ADD(gpio_power, wl->debugfs.rootdir);
498
437out: 499out:
438 if (ret < 0) 500 if (ret < 0)
439 wl1271_debugfs_delete_files(wl); 501 wl1271_debugfs_delete_files(wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c
index d13fdd99c85c..7468ef10194b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/wl1271_event.c
@@ -24,6 +24,7 @@
24#include "wl1271.h" 24#include "wl1271.h"
25#include "wl1271_reg.h" 25#include "wl1271_reg.h"
26#include "wl1271_spi.h" 26#include "wl1271_spi.h"
27#include "wl1271_io.h"
27#include "wl1271_event.h" 28#include "wl1271_event.h"
28#include "wl1271_ps.h" 29#include "wl1271_ps.h"
29#include "wl12xx_80211.h" 30#include "wl12xx_80211.h"
@@ -35,7 +36,7 @@ static int wl1271_event_scan_complete(struct wl1271 *wl,
35 wl1271_debug(DEBUG_EVENT, "status: 0x%x", 36 wl1271_debug(DEBUG_EVENT, "status: 0x%x",
36 mbox->scheduled_scan_status); 37 mbox->scheduled_scan_status);
37 38
38 if (wl->scanning) { 39 if (test_bit(WL1271_FLAG_SCANNING, &wl->flags)) {
39 if (wl->scan.state == WL1271_SCAN_BAND_DUAL) { 40 if (wl->scan.state == WL1271_SCAN_BAND_DUAL) {
40 wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, 41 wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
41 NULL, size); 42 NULL, size);
@@ -43,7 +44,7 @@ static int wl1271_event_scan_complete(struct wl1271 *wl,
43 * to the wl1271_cmd_scan function that we are not 44 * to the wl1271_cmd_scan function that we are not
44 * scanning as it checks that. 45 * scanning as it checks that.
45 */ 46 */
46 wl->scanning = false; 47 clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
47 wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len, 48 wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len,
48 wl->scan.active, 49 wl->scan.active,
49 wl->scan.high_prio, 50 wl->scan.high_prio,
@@ -62,7 +63,7 @@ static int wl1271_event_scan_complete(struct wl1271 *wl,
62 mutex_unlock(&wl->mutex); 63 mutex_unlock(&wl->mutex);
63 ieee80211_scan_completed(wl->hw, false); 64 ieee80211_scan_completed(wl->hw, false);
64 mutex_lock(&wl->mutex); 65 mutex_lock(&wl->mutex);
65 wl->scanning = false; 66 clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
66 } 67 }
67 } 68 }
68 return 0; 69 return 0;
@@ -78,25 +79,61 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
78 79
79 switch (mbox->ps_status) { 80 switch (mbox->ps_status) {
80 case EVENT_ENTER_POWER_SAVE_FAIL: 81 case EVENT_ENTER_POWER_SAVE_FAIL:
81 if (!wl->psm) { 82 wl1271_debug(DEBUG_PSM, "PSM entry failed");
83
84 if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
85 /* remain in active mode */
82 wl->psm_entry_retry = 0; 86 wl->psm_entry_retry = 0;
83 break; 87 break;
84 } 88 }
85 89
86 if (wl->psm_entry_retry < wl->conf.conn.psm_entry_retries) { 90 if (wl->psm_entry_retry < wl->conf.conn.psm_entry_retries) {
87 wl->psm_entry_retry++; 91 wl->psm_entry_retry++;
88 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE); 92 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
93 true);
89 } else { 94 } else {
90 wl1271_error("PSM entry failed, giving up.\n"); 95 wl1271_error("PSM entry failed, giving up.\n");
96 /* FIXME: this may need to be reconsidered. for now it
97 is not possible to indicate to the mac80211
98 afterwards that PSM entry failed. To maximize
99 functionality (receiving data and remaining
100 associated) make sure that we are in sync with the
101 AP in regard of PSM mode. */
102 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
103 false);
91 wl->psm_entry_retry = 0; 104 wl->psm_entry_retry = 0;
92 *beacon_loss = true;
93 } 105 }
94 break; 106 break;
95 case EVENT_ENTER_POWER_SAVE_SUCCESS: 107 case EVENT_ENTER_POWER_SAVE_SUCCESS:
96 wl->psm_entry_retry = 0; 108 wl->psm_entry_retry = 0;
109
110 /* enable beacon filtering */
111 ret = wl1271_acx_beacon_filter_opt(wl, true);
112 if (ret < 0)
113 break;
114
115 /* enable beacon early termination */
116 ret = wl1271_acx_bet_enable(wl, true);
117 if (ret < 0)
118 break;
119
120 /* go to extremely low power mode */
121 wl1271_ps_elp_sleep(wl);
122 if (ret < 0)
123 break;
97 break; 124 break;
98 case EVENT_EXIT_POWER_SAVE_FAIL: 125 case EVENT_EXIT_POWER_SAVE_FAIL:
99 wl1271_info("PSM exit failed"); 126 wl1271_debug(DEBUG_PSM, "PSM exit failed");
127
128 if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
129 wl->psm_entry_retry = 0;
130 break;
131 }
132
133 /* make sure the firmware goes to active mode - the frame to
134 be sent next will indicate to the AP, that we are active. */
135 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
136 false);
100 break; 137 break;
101 case EVENT_EXIT_POWER_SAVE_SUCCESS: 138 case EVENT_EXIT_POWER_SAVE_SUCCESS:
102 default: 139 default:
@@ -136,7 +173,8 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
136 * filtering) is enabled. Without PSM, the stack will receive all 173 * filtering) is enabled. Without PSM, the stack will receive all
137 * beacons and can detect beacon loss by itself. 174 * beacons and can detect beacon loss by itself.
138 */ 175 */
139 if (vector & BSS_LOSE_EVENT_ID && wl->psm) { 176 if (vector & BSS_LOSE_EVENT_ID &&
177 test_bit(WL1271_FLAG_PSM, &wl->flags)) {
140 wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT"); 178 wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT");
141 179
142 /* indicate to the stack, that beacons have been lost */ 180 /* indicate to the stack, that beacons have been lost */
@@ -150,7 +188,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
150 return ret; 188 return ret;
151 } 189 }
152 190
153 if (beacon_loss) { 191 if (wl->vif && beacon_loss) {
154 /* Obviously, it's dangerous to release the mutex while 192 /* Obviously, it's dangerous to release the mutex while
155 we are holding many of the variables in the wl struct. 193 we are holding many of the variables in the wl struct.
156 That's why it's done last in the function, and care must 194 That's why it's done last in the function, and care must
@@ -177,14 +215,14 @@ int wl1271_event_unmask(struct wl1271 *wl)
177 215
178void wl1271_event_mbox_config(struct wl1271 *wl) 216void wl1271_event_mbox_config(struct wl1271 *wl)
179{ 217{
180 wl->mbox_ptr[0] = wl1271_spi_read32(wl, REG_EVENT_MAILBOX_PTR); 218 wl->mbox_ptr[0] = wl1271_read32(wl, REG_EVENT_MAILBOX_PTR);
181 wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox); 219 wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
182 220
183 wl1271_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x", 221 wl1271_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x",
184 wl->mbox_ptr[0], wl->mbox_ptr[1]); 222 wl->mbox_ptr[0], wl->mbox_ptr[1]);
185} 223}
186 224
187int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num, bool do_ack) 225int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
188{ 226{
189 struct event_mailbox mbox; 227 struct event_mailbox mbox;
190 int ret; 228 int ret;
@@ -195,8 +233,8 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num, bool do_ack)
195 return -EINVAL; 233 return -EINVAL;
196 234
197 /* first we read the mbox descriptor */ 235 /* first we read the mbox descriptor */
198 wl1271_spi_read(wl, wl->mbox_ptr[mbox_num], &mbox, 236 wl1271_read(wl, wl->mbox_ptr[mbox_num], &mbox,
199 sizeof(struct event_mailbox), false); 237 sizeof(struct event_mailbox), false);
200 238
201 /* process the descriptor */ 239 /* process the descriptor */
202 ret = wl1271_event_process(wl, &mbox); 240 ret = wl1271_event_process(wl, &mbox);
@@ -204,9 +242,7 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num, bool do_ack)
204 return ret; 242 return ret;
205 243
206 /* then we let the firmware know it can go on...*/ 244 /* then we let the firmware know it can go on...*/
207 if (do_ack) 245 wl1271_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK);
208 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG,
209 INTR_TRIG_EVENT_ACK);
210 246
211 return 0; 247 return 0;
212} 248}
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h
index 4e3f55ebb1a8..278f9206aa56 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/wl1271_event.h
@@ -112,6 +112,6 @@ struct event_mailbox {
112 112
113int wl1271_event_unmask(struct wl1271 *wl); 113int wl1271_event_unmask(struct wl1271 *wl);
114void wl1271_event_mbox_config(struct wl1271 *wl); 114void wl1271_event_mbox_config(struct wl1271 *wl);
115int wl1271_event_handle(struct wl1271 *wl, u8 mbox, bool do_ack); 115int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
116 116
117#endif 117#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/wl1271_init.c
index 11249b436cf1..86c30a86a456 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.c
+++ b/drivers/net/wireless/wl12xx/wl1271_init.c
@@ -49,7 +49,7 @@ static int wl1271_init_hwenc_config(struct wl1271 *wl)
49 return 0; 49 return 0;
50} 50}
51 51
52static int wl1271_init_templates_config(struct wl1271 *wl) 52int wl1271_init_templates_config(struct wl1271 *wl)
53{ 53{
54 int ret; 54 int ret;
55 55
@@ -113,7 +113,7 @@ static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter)
113 return 0; 113 return 0;
114} 114}
115 115
116static int wl1271_init_phy_config(struct wl1271 *wl) 116int wl1271_init_phy_config(struct wl1271 *wl)
117{ 117{
118 int ret; 118 int ret;
119 119
@@ -156,7 +156,7 @@ static int wl1271_init_beacon_filter(struct wl1271 *wl)
156 return 0; 156 return 0;
157} 157}
158 158
159static int wl1271_init_pta(struct wl1271 *wl) 159int wl1271_init_pta(struct wl1271 *wl)
160{ 160{
161 int ret; 161 int ret;
162 162
@@ -171,7 +171,7 @@ static int wl1271_init_pta(struct wl1271 *wl)
171 return 0; 171 return 0;
172} 172}
173 173
174static int wl1271_init_energy_detection(struct wl1271 *wl) 174int wl1271_init_energy_detection(struct wl1271 *wl)
175{ 175{
176 int ret; 176 int ret;
177 177
@@ -195,7 +195,9 @@ static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
195 195
196int wl1271_hw_init(struct wl1271 *wl) 196int wl1271_hw_init(struct wl1271 *wl)
197{ 197{
198 int ret; 198 struct conf_tx_ac_category *conf_ac;
199 struct conf_tx_tid *conf_tid;
200 int ret, i;
199 201
200 ret = wl1271_cmd_general_parms(wl); 202 ret = wl1271_cmd_general_parms(wl);
201 if (ret < 0) 203 if (ret < 0)
@@ -229,6 +231,10 @@ int wl1271_hw_init(struct wl1271 *wl)
229 if (ret < 0) 231 if (ret < 0)
230 goto out_free_memmap; 232 goto out_free_memmap;
231 233
234 ret = wl1271_acx_dco_itrim_params(wl);
235 if (ret < 0)
236 goto out_free_memmap;
237
232 /* Initialize connection monitoring thresholds */ 238 /* Initialize connection monitoring thresholds */
233 ret = wl1271_acx_conn_monit_params(wl); 239 ret = wl1271_acx_conn_monit_params(wl);
234 if (ret < 0) 240 if (ret < 0)
@@ -270,22 +276,36 @@ int wl1271_hw_init(struct wl1271 *wl)
270 goto out_free_memmap; 276 goto out_free_memmap;
271 277
272 /* Default TID configuration */ 278 /* Default TID configuration */
273 ret = wl1271_acx_tid_cfg(wl); 279 for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
274 if (ret < 0) 280 conf_tid = &wl->conf.tx.tid_conf[i];
275 goto out_free_memmap; 281 ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
282 conf_tid->channel_type,
283 conf_tid->tsid,
284 conf_tid->ps_scheme,
285 conf_tid->ack_policy,
286 conf_tid->apsd_conf[0],
287 conf_tid->apsd_conf[1]);
288 if (ret < 0)
289 goto out_free_memmap;
290 }
276 291
277 /* Default AC configuration */ 292 /* Default AC configuration */
278 ret = wl1271_acx_ac_cfg(wl); 293 for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
279 if (ret < 0) 294 conf_ac = &wl->conf.tx.ac_conf[i];
280 goto out_free_memmap; 295 ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
296 conf_ac->cw_max, conf_ac->aifsn,
297 conf_ac->tx_op_limit);
298 if (ret < 0)
299 goto out_free_memmap;
300 }
281 301
282 /* Configure TX rate classes */ 302 /* Configure TX rate classes */
283 ret = wl1271_acx_rate_policies(wl, CONF_TX_RATE_MASK_ALL); 303 ret = wl1271_acx_rate_policies(wl);
284 if (ret < 0) 304 if (ret < 0)
285 goto out_free_memmap; 305 goto out_free_memmap;
286 306
287 /* Enable data path */ 307 /* Enable data path */
288 ret = wl1271_cmd_data_path(wl, wl->channel, 1); 308 ret = wl1271_cmd_data_path(wl, 1);
289 if (ret < 0) 309 if (ret < 0)
290 goto out_free_memmap; 310 goto out_free_memmap;
291 311
@@ -299,8 +319,8 @@ int wl1271_hw_init(struct wl1271 *wl)
299 if (ret < 0) 319 if (ret < 0)
300 goto out_free_memmap; 320 goto out_free_memmap;
301 321
302 /* Configure smart reflex */ 322 /* configure PM */
303 ret = wl1271_acx_smart_reflex(wl); 323 ret = wl1271_acx_pm_config(wl);
304 if (ret < 0) 324 if (ret < 0)
305 goto out_free_memmap; 325 goto out_free_memmap;
306 326
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.h b/drivers/net/wireless/wl12xx/wl1271_init.h
index 930677fbe852..bc26f8c53b91 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.h
+++ b/drivers/net/wireless/wl12xx/wl1271_init.h
@@ -27,6 +27,10 @@
27#include "wl1271.h" 27#include "wl1271.h"
28 28
29int wl1271_hw_init_power_auth(struct wl1271 *wl); 29int wl1271_hw_init_power_auth(struct wl1271 *wl);
30int wl1271_init_templates_config(struct wl1271 *wl);
31int wl1271_init_phy_config(struct wl1271 *wl);
32int wl1271_init_pta(struct wl1271 *wl);
33int wl1271_init_energy_detection(struct wl1271 *wl);
30int wl1271_hw_init(struct wl1271 *wl); 34int wl1271_hw_init(struct wl1271 *wl);
31 35
32#endif 36#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.c b/drivers/net/wireless/wl12xx/wl1271_io.c
new file mode 100644
index 000000000000..5cd94d5666c2
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_io.c
@@ -0,0 +1,213 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2008-2010 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/platform_device.h>
26#include <linux/crc7.h>
27#include <linux/spi/spi.h>
28
29#include "wl1271.h"
30#include "wl12xx_80211.h"
31#include "wl1271_spi.h"
32#include "wl1271_io.h"
33
34static int wl1271_translate_addr(struct wl1271 *wl, int addr)
35{
36 /*
37 * To translate, first check to which window of addresses the
38 * particular address belongs. Then subtract the starting address
39 * of that window from the address. Then, add offset of the
40 * translated region.
41 *
42 * The translated regions occur next to each other in physical device
43 * memory, so just add the sizes of the preceeding address regions to
44 * get the offset to the new region.
45 *
46 * Currently, only the two first regions are addressed, and the
47 * assumption is that all addresses will fall into either of those
48 * two.
49 */
50 if ((addr >= wl->part.reg.start) &&
51 (addr < wl->part.reg.start + wl->part.reg.size))
52 return addr - wl->part.reg.start + wl->part.mem.size;
53 else
54 return addr - wl->part.mem.start;
55}
56
57/* Set the SPI partitions to access the chip addresses
58 *
59 * To simplify driver code, a fixed (virtual) memory map is defined for
60 * register and memory addresses. Because in the chipset, in different stages
61 * of operation, those addresses will move around, an address translation
62 * mechanism is required.
63 *
64 * There are four partitions (three memory and one register partition),
65 * which are mapped to two different areas of the hardware memory.
66 *
67 * Virtual address
68 * space
69 *
70 * | |
71 * ...+----+--> mem.start
72 * Physical address ... | |
73 * space ... | | [PART_0]
74 * ... | |
75 * 00000000 <--+----+... ...+----+--> mem.start + mem.size
76 * | | ... | |
77 * |MEM | ... | |
78 * | | ... | |
79 * mem.size <--+----+... | | {unused area)
80 * | | ... | |
81 * |REG | ... | |
82 * mem.size | | ... | |
83 * + <--+----+... ...+----+--> reg.start
84 * reg.size | | ... | |
85 * |MEM2| ... | | [PART_1]
86 * | | ... | |
87 * ...+----+--> reg.start + reg.size
88 * | |
89 *
90 */
91int wl1271_set_partition(struct wl1271 *wl,
92 struct wl1271_partition_set *p)
93{
94 /* copy partition info */
95 memcpy(&wl->part, p, sizeof(*p));
96
97 wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
98 p->mem.start, p->mem.size);
99 wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
100 p->reg.start, p->reg.size);
101 wl1271_debug(DEBUG_SPI, "mem2_start %08X mem2_size %08X",
102 p->mem2.start, p->mem2.size);
103 wl1271_debug(DEBUG_SPI, "mem3_start %08X mem3_size %08X",
104 p->mem3.start, p->mem3.size);
105
106 /* write partition info to the chipset */
107 wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
108 wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
109 wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
110 wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
111 wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
112 wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
113 wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
114
115 return 0;
116}
117
118void wl1271_io_reset(struct wl1271 *wl)
119{
120 wl1271_spi_reset(wl);
121}
122
123void wl1271_io_init(struct wl1271 *wl)
124{
125 wl1271_spi_init(wl);
126}
127
128void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
129 size_t len, bool fixed)
130{
131 wl1271_spi_raw_write(wl, addr, buf, len, fixed);
132}
133
134void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
135 size_t len, bool fixed)
136{
137 wl1271_spi_raw_read(wl, addr, buf, len, fixed);
138}
139
140void wl1271_read(struct wl1271 *wl, int addr, void *buf, size_t len,
141 bool fixed)
142{
143 int physical;
144
145 physical = wl1271_translate_addr(wl, addr);
146
147 wl1271_spi_raw_read(wl, physical, buf, len, fixed);
148}
149
150void wl1271_write(struct wl1271 *wl, int addr, void *buf, size_t len,
151 bool fixed)
152{
153 int physical;
154
155 physical = wl1271_translate_addr(wl, addr);
156
157 wl1271_spi_raw_write(wl, physical, buf, len, fixed);
158}
159
160u32 wl1271_read32(struct wl1271 *wl, int addr)
161{
162 return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
163}
164
165void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
166{
167 wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val);
168}
169
170void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
171{
172 /* write address >> 1 + 0x30000 to OCP_POR_CTR */
173 addr = (addr >> 1) + 0x30000;
174 wl1271_write32(wl, OCP_POR_CTR, addr);
175
176 /* write value to OCP_POR_WDATA */
177 wl1271_write32(wl, OCP_DATA_WRITE, val);
178
179 /* write 1 to OCP_CMD */
180 wl1271_write32(wl, OCP_CMD, OCP_CMD_WRITE);
181}
182
183u16 wl1271_top_reg_read(struct wl1271 *wl, int addr)
184{
185 u32 val;
186 int timeout = OCP_CMD_LOOP;
187
188 /* write address >> 1 + 0x30000 to OCP_POR_CTR */
189 addr = (addr >> 1) + 0x30000;
190 wl1271_write32(wl, OCP_POR_CTR, addr);
191
192 /* write 2 to OCP_CMD */
193 wl1271_write32(wl, OCP_CMD, OCP_CMD_READ);
194
195 /* poll for data ready */
196 do {
197 val = wl1271_read32(wl, OCP_DATA_READ);
198 } while (!(val & OCP_READY_MASK) && --timeout);
199
200 if (!timeout) {
201 wl1271_warning("Top register access timed out.");
202 return 0xffff;
203 }
204
205 /* check data status and return if OK */
206 if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK)
207 return val & 0xffff;
208 else {
209 wl1271_warning("Top register access returned error.");
210 return 0xffff;
211 }
212}
213
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.h b/drivers/net/wireless/wl12xx/wl1271_io.h
new file mode 100644
index 000000000000..fa9a0b35788f
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_io.h
@@ -0,0 +1,68 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 1998-2009 Texas Instruments. All rights reserved.
5 * Copyright (C) 2008-2010 Nokia Corporation
6 *
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 *
23 */
24
25#ifndef __WL1271_IO_H__
26#define __WL1271_IO_H__
27
28struct wl1271;
29
30void wl1271_io_reset(struct wl1271 *wl);
31void wl1271_io_init(struct wl1271 *wl);
32
33/* Raw target IO, address is not translated */
34void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
35 size_t len, bool fixed);
36void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
37 size_t len, bool fixed);
38
39/* Translated target IO */
40void wl1271_read(struct wl1271 *wl, int addr, void *buf, size_t len,
41 bool fixed);
42void wl1271_write(struct wl1271 *wl, int addr, void *buf, size_t len,
43 bool fixed);
44u32 wl1271_read32(struct wl1271 *wl, int addr);
45void wl1271_write32(struct wl1271 *wl, int addr, u32 val);
46
47/* Top Register IO */
48void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val);
49u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
50
51int wl1271_set_partition(struct wl1271 *wl,
52 struct wl1271_partition_set *p);
53
54static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
55{
56 wl1271_raw_read(wl, addr, &wl->buffer_32,
57 sizeof(wl->buffer_32), false);
58
59 return wl->buffer_32;
60}
61
62static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val)
63{
64 wl->buffer_32 = val;
65 wl1271_raw_write(wl, addr, &wl->buffer_32,
66 sizeof(wl->buffer_32), false);
67}
68#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index b62c00ff42fe..2a864b24291d 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * This file is part of wl1271 2 * This file is part of wl1271
3 * 3 *
4 * Copyright (C) 2008-2009 Nokia Corporation 4 * Copyright (C) 2008-2010 Nokia Corporation
5 * 5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com> 6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 * 7 *
@@ -38,6 +38,7 @@
38#include "wl12xx_80211.h" 38#include "wl12xx_80211.h"
39#include "wl1271_reg.h" 39#include "wl1271_reg.h"
40#include "wl1271_spi.h" 40#include "wl1271_spi.h"
41#include "wl1271_io.h"
41#include "wl1271_event.h" 42#include "wl1271_event.h"
42#include "wl1271_tx.h" 43#include "wl1271_tx.h"
43#include "wl1271_rx.h" 44#include "wl1271_rx.h"
@@ -46,6 +47,9 @@
46#include "wl1271_debugfs.h" 47#include "wl1271_debugfs.h"
47#include "wl1271_cmd.h" 48#include "wl1271_cmd.h"
48#include "wl1271_boot.h" 49#include "wl1271_boot.h"
50#include "wl1271_testmode.h"
51
52#define WL1271_BOOT_RETRIES 3
49 53
50static struct conf_drv_settings default_conf = { 54static struct conf_drv_settings default_conf = {
51 .sg = { 55 .sg = {
@@ -67,16 +71,17 @@ static struct conf_drv_settings default_conf = {
67 .ps_poll_timeout = 15, 71 .ps_poll_timeout = 15,
68 .upsd_timeout = 15, 72 .upsd_timeout = 15,
69 .rts_threshold = 2347, 73 .rts_threshold = 2347,
70 .rx_cca_threshold = 0xFFEF, 74 .rx_cca_threshold = 0,
71 .irq_blk_threshold = 0, 75 .irq_blk_threshold = 0xFFFF,
72 .irq_pkt_threshold = USHORT_MAX, 76 .irq_pkt_threshold = 0,
73 .irq_timeout = 5, 77 .irq_timeout = 600,
74 .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY, 78 .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
75 }, 79 },
76 .tx = { 80 .tx = {
77 .tx_energy_detection = 0, 81 .tx_energy_detection = 0,
78 .rc_conf = { 82 .rc_conf = {
79 .enabled_rates = CONF_TX_RATE_MASK_UNSPECIFIED, 83 .enabled_rates = CONF_HW_BIT_RATE_1MBPS |
84 CONF_HW_BIT_RATE_2MBPS,
80 .short_retry_limit = 10, 85 .short_retry_limit = 10,
81 .long_retry_limit = 10, 86 .long_retry_limit = 10,
82 .aflags = 0 87 .aflags = 0
@@ -172,8 +177,8 @@ static struct conf_drv_settings default_conf = {
172 } 177 }
173 }, 178 },
174 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD, 179 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
175 .tx_compl_timeout = 5, 180 .tx_compl_timeout = 700,
176 .tx_compl_threshold = 5 181 .tx_compl_threshold = 4
177 }, 182 },
178 .conn = { 183 .conn = {
179 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM, 184 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
@@ -186,12 +191,12 @@ static struct conf_drv_settings default_conf = {
186 .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE, 191 .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
187 } 192 }
188 }, 193 },
189 .synch_fail_thold = 5, 194 .synch_fail_thold = 10,
190 .bss_lose_timeout = 100, 195 .bss_lose_timeout = 100,
191 .beacon_rx_timeout = 10000, 196 .beacon_rx_timeout = 10000,
192 .broadcast_timeout = 20000, 197 .broadcast_timeout = 20000,
193 .rx_broadcast_in_ps = 1, 198 .rx_broadcast_in_ps = 1,
194 .ps_poll_threshold = 4, 199 .ps_poll_threshold = 20,
195 .sig_trigger_count = 2, 200 .sig_trigger_count = 2,
196 .sig_trigger = { 201 .sig_trigger = {
197 [0] = { 202 [0] = {
@@ -226,97 +231,17 @@ static struct conf_drv_settings default_conf = {
226 .psm_entry_retries = 3 231 .psm_entry_retries = 3
227 }, 232 },
228 .init = { 233 .init = {
229 .sr_err_tbl = {
230 [0] = {
231 .len = 7,
232 .upper_limit = 0x03,
233 .values = {
234 0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
235 0x00 }
236 },
237 [1] = {
238 .len = 7,
239 .upper_limit = 0x03,
240 .values = {
241 0x18, 0x10, 0x05, 0xf6, 0xf0, 0xe8,
242 0x00 }
243 },
244 [2] = {
245 .len = 7,
246 .upper_limit = 0x03,
247 .values = {
248 0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
249 0x00 }
250 }
251 },
252 .sr_enable = 1,
253 .genparam = {
254 .ref_clk = CONF_REF_CLK_38_4_E,
255 .settling_time = 5,
256 .clk_valid_on_wakeup = 0,
257 .dc2dcmode = 0,
258 .single_dual_band = CONF_SINGLE_BAND,
259 .tx_bip_fem_autodetect = 0,
260 .tx_bip_fem_manufacturer = 1,
261 .settings = 1,
262 },
263 .radioparam = { 234 .radioparam = {
264 .rx_trace_loss = 10, 235 .fem = 1,
265 .tx_trace_loss = 10,
266 .rx_rssi_and_proc_compens = {
267 0xec, 0xf6, 0x00, 0x0c, 0x18, 0xf8,
268 0xfc, 0x00, 0x08, 0x10, 0xf0, 0xf8,
269 0x00, 0x0a, 0x14 },
270 .rx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 },
271 .tx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 },
272 .rx_rssi_and_proc_compens_5 = {
273 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
275 0x00, 0x00, 0x00 },
276 .tx_ref_pd_voltage = 0x24e,
277 .tx_ref_power = 0x78,
278 .tx_offset_db = 0x0,
279 .tx_rate_limits_normal = {
280 0x1e, 0x1f, 0x22, 0x24, 0x28, 0x29 },
281 .tx_rate_limits_degraded = {
282 0x1b, 0x1c, 0x1e, 0x20, 0x24, 0x25 },
283 .tx_channel_limits_11b = {
284 0x22, 0x50, 0x50, 0x50, 0x50, 0x50,
285 0x50, 0x50, 0x50, 0x50, 0x22, 0x50,
286 0x22, 0x50 },
287 .tx_channel_limits_ofdm = {
288 0x20, 0x50, 0x50, 0x50, 0x50, 0x50,
289 0x50, 0x50, 0x50, 0x50, 0x20, 0x50,
290 0x20, 0x50 },
291 .tx_pdv_rate_offsets = {
292 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
293 .tx_ibias = {
294 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x27 },
295 .rx_fem_insertion_loss = 0x14,
296 .tx_ref_pd_voltage_5 = {
297 0x0190, 0x01a4, 0x01c3, 0x01d8,
298 0x020a, 0x021c },
299 .tx_ref_power_5 = {
300 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 },
301 .tx_offset_db_5 = {
302 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
303 .tx_rate_limits_normal_5 = {
304 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
305 .tx_rate_limits_degraded_5 = {
306 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
307 .tx_channel_limits_ofdm_5 = {
308 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
309 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
310 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
311 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
312 0x50, 0x50, 0x50 },
313 .tx_pdv_rate_offsets_5 = {
314 0x01, 0x02, 0x02, 0x02, 0x02, 0x00 },
315 .tx_ibias_5 = {
316 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 },
317 .rx_fem_insertion_loss_5 = {
318 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 }
319 } 236 }
237 },
238 .itrim = {
239 .enable = false,
240 .timeout = 50000,
241 },
242 .pm_config = {
243 .host_clk_settling_time = 5000,
244 .host_fast_wakeup_support = false
320 } 245 }
321}; 246};
322 247
@@ -337,15 +262,14 @@ static void wl1271_conf_init(struct wl1271 *wl)
337 262
338 /* apply driver default configuration */ 263 /* apply driver default configuration */
339 memcpy(&wl->conf, &default_conf, sizeof(default_conf)); 264 memcpy(&wl->conf, &default_conf, sizeof(default_conf));
340
341 if (wl1271_11a_enabled())
342 wl->conf.init.genparam.single_dual_band = CONF_DUAL_BAND;
343} 265}
344 266
345 267
346static int wl1271_plt_init(struct wl1271 *wl) 268static int wl1271_plt_init(struct wl1271 *wl)
347{ 269{
348 int ret; 270 struct conf_tx_ac_category *conf_ac;
271 struct conf_tx_tid *conf_tid;
272 int ret, i;
349 273
350 ret = wl1271_cmd_general_parms(wl); 274 ret = wl1271_cmd_general_parms(wl);
351 if (ret < 0) 275 if (ret < 0)
@@ -355,15 +279,89 @@ static int wl1271_plt_init(struct wl1271 *wl)
355 if (ret < 0) 279 if (ret < 0)
356 return ret; 280 return ret;
357 281
358 ret = wl1271_acx_init_mem_config(wl); 282 ret = wl1271_init_templates_config(wl);
359 if (ret < 0) 283 if (ret < 0)
360 return ret; 284 return ret;
361 285
362 ret = wl1271_cmd_data_path(wl, wl->channel, 1); 286 ret = wl1271_acx_init_mem_config(wl);
363 if (ret < 0) 287 if (ret < 0)
364 return ret; 288 return ret;
365 289
290 /* PHY layer config */
291 ret = wl1271_init_phy_config(wl);
292 if (ret < 0)
293 goto out_free_memmap;
294
295 ret = wl1271_acx_dco_itrim_params(wl);
296 if (ret < 0)
297 goto out_free_memmap;
298
299 /* Initialize connection monitoring thresholds */
300 ret = wl1271_acx_conn_monit_params(wl);
301 if (ret < 0)
302 goto out_free_memmap;
303
304 /* Bluetooth WLAN coexistence */
305 ret = wl1271_init_pta(wl);
306 if (ret < 0)
307 goto out_free_memmap;
308
309 /* Energy detection */
310 ret = wl1271_init_energy_detection(wl);
311 if (ret < 0)
312 goto out_free_memmap;
313
314 /* Default fragmentation threshold */
315 ret = wl1271_acx_frag_threshold(wl);
316 if (ret < 0)
317 goto out_free_memmap;
318
319 /* Default TID configuration */
320 for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
321 conf_tid = &wl->conf.tx.tid_conf[i];
322 ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
323 conf_tid->channel_type,
324 conf_tid->tsid,
325 conf_tid->ps_scheme,
326 conf_tid->ack_policy,
327 conf_tid->apsd_conf[0],
328 conf_tid->apsd_conf[1]);
329 if (ret < 0)
330 goto out_free_memmap;
331 }
332
333 /* Default AC configuration */
334 for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
335 conf_ac = &wl->conf.tx.ac_conf[i];
336 ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
337 conf_ac->cw_max, conf_ac->aifsn,
338 conf_ac->tx_op_limit);
339 if (ret < 0)
340 goto out_free_memmap;
341 }
342
343 /* Enable data path */
344 ret = wl1271_cmd_data_path(wl, 1);
345 if (ret < 0)
346 goto out_free_memmap;
347
348 /* Configure for CAM power saving (ie. always active) */
349 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
350 if (ret < 0)
351 goto out_free_memmap;
352
353 /* configure PM */
354 ret = wl1271_acx_pm_config(wl);
355 if (ret < 0)
356 goto out_free_memmap;
357
366 return 0; 358 return 0;
359
360 out_free_memmap:
361 kfree(wl->target_mem_map);
362 wl->target_mem_map = NULL;
363
364 return ret;
367} 365}
368 366
369static void wl1271_disable_interrupts(struct wl1271 *wl) 367static void wl1271_disable_interrupts(struct wl1271 *wl)
@@ -374,11 +372,13 @@ static void wl1271_disable_interrupts(struct wl1271 *wl)
374static void wl1271_power_off(struct wl1271 *wl) 372static void wl1271_power_off(struct wl1271 *wl)
375{ 373{
376 wl->set_power(false); 374 wl->set_power(false);
375 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
377} 376}
378 377
379static void wl1271_power_on(struct wl1271 *wl) 378static void wl1271_power_on(struct wl1271 *wl)
380{ 379{
381 wl->set_power(true); 380 wl->set_power(true);
381 set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
382} 382}
383 383
384static void wl1271_fw_status(struct wl1271 *wl, 384static void wl1271_fw_status(struct wl1271 *wl,
@@ -387,8 +387,7 @@ static void wl1271_fw_status(struct wl1271 *wl,
387 u32 total = 0; 387 u32 total = 0;
388 int i; 388 int i;
389 389
390 wl1271_spi_read(wl, FW_STATUS_ADDR, status, 390 wl1271_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false);
391 sizeof(*status), false);
392 391
393 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " 392 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
394 "drv_rx_counter = %d, tx_results_counter = %d)", 393 "drv_rx_counter = %d, tx_results_counter = %d)",
@@ -435,7 +434,7 @@ static void wl1271_irq_work(struct work_struct *work)
435 if (ret < 0) 434 if (ret < 0)
436 goto out; 435 goto out;
437 436
438 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL); 437 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
439 438
440 wl1271_fw_status(wl, wl->fw_status); 439 wl1271_fw_status(wl, wl->fw_status);
441 intr = le32_to_cpu(wl->fw_status->intr); 440 intr = le32_to_cpu(wl->fw_status->intr);
@@ -447,14 +446,13 @@ static void wl1271_irq_work(struct work_struct *work)
447 intr &= WL1271_INTR_MASK; 446 intr &= WL1271_INTR_MASK;
448 447
449 if (intr & WL1271_ACX_INTR_EVENT_A) { 448 if (intr & WL1271_ACX_INTR_EVENT_A) {
450 bool do_ack = (intr & WL1271_ACX_INTR_EVENT_B) ? false : true;
451 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A"); 449 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
452 wl1271_event_handle(wl, 0, do_ack); 450 wl1271_event_handle(wl, 0);
453 } 451 }
454 452
455 if (intr & WL1271_ACX_INTR_EVENT_B) { 453 if (intr & WL1271_ACX_INTR_EVENT_B) {
456 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B"); 454 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
457 wl1271_event_handle(wl, 1, true); 455 wl1271_event_handle(wl, 1);
458 } 456 }
459 457
460 if (intr & WL1271_ACX_INTR_INIT_COMPLETE) 458 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
@@ -478,8 +476,8 @@ static void wl1271_irq_work(struct work_struct *work)
478 } 476 }
479 477
480out_sleep: 478out_sleep:
481 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK, 479 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
482 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK)); 480 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
483 wl1271_ps_elp_sleep(wl); 481 wl1271_ps_elp_sleep(wl);
484 482
485out: 483out:
@@ -546,6 +544,40 @@ out:
546 return ret; 544 return ret;
547} 545}
548 546
547static int wl1271_update_mac_addr(struct wl1271 *wl)
548{
549 int ret = 0;
550 u8 *nvs_ptr = (u8 *)wl->nvs->nvs;
551
552 /* get mac address from the NVS */
553 wl->mac_addr[0] = nvs_ptr[11];
554 wl->mac_addr[1] = nvs_ptr[10];
555 wl->mac_addr[2] = nvs_ptr[6];
556 wl->mac_addr[3] = nvs_ptr[5];
557 wl->mac_addr[4] = nvs_ptr[4];
558 wl->mac_addr[5] = nvs_ptr[3];
559
560 /* FIXME: if it is a zero-address, we should bail out. Now, instead,
561 we randomize an address */
562 if (is_zero_ether_addr(wl->mac_addr)) {
563 static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf};
564 memcpy(wl->mac_addr, nokia_oui, 3);
565 get_random_bytes(wl->mac_addr + 3, 3);
566
567 /* update this address to the NVS */
568 nvs_ptr[11] = wl->mac_addr[0];
569 nvs_ptr[10] = wl->mac_addr[1];
570 nvs_ptr[6] = wl->mac_addr[2];
571 nvs_ptr[5] = wl->mac_addr[3];
572 nvs_ptr[4] = wl->mac_addr[4];
573 nvs_ptr[3] = wl->mac_addr[5];
574 }
575
576 SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
577
578 return ret;
579}
580
549static int wl1271_fetch_nvs(struct wl1271 *wl) 581static int wl1271_fetch_nvs(struct wl1271 *wl)
550{ 582{
551 const struct firmware *fw; 583 const struct firmware *fw;
@@ -558,15 +590,14 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
558 return ret; 590 return ret;
559 } 591 }
560 592
561 if (fw->size % 4) { 593 if (fw->size != sizeof(struct wl1271_nvs_file)) {
562 wl1271_error("nvs size is not multiple of 32 bits: %zu", 594 wl1271_error("nvs size is not as expected: %zu != %zu",
563 fw->size); 595 fw->size, sizeof(struct wl1271_nvs_file));
564 ret = -EILSEQ; 596 ret = -EILSEQ;
565 goto out; 597 goto out;
566 } 598 }
567 599
568 wl->nvs_len = fw->size; 600 wl->nvs = kmalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL);
569 wl->nvs = kmalloc(wl->nvs_len, GFP_KERNEL);
570 601
571 if (!wl->nvs) { 602 if (!wl->nvs) {
572 wl1271_error("could not allocate memory for the nvs file"); 603 wl1271_error("could not allocate memory for the nvs file");
@@ -574,9 +605,9 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
574 goto out; 605 goto out;
575 } 606 }
576 607
577 memcpy(wl->nvs, fw->data, wl->nvs_len); 608 memcpy(wl->nvs, fw->data, sizeof(struct wl1271_nvs_file));
578 609
579 ret = 0; 610 ret = wl1271_update_mac_addr(wl);
580 611
581out: 612out:
582 release_firmware(fw); 613 release_firmware(fw);
@@ -614,10 +645,11 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
614 struct wl1271_partition_set partition; 645 struct wl1271_partition_set partition;
615 int ret = 0; 646 int ret = 0;
616 647
648 msleep(WL1271_PRE_POWER_ON_SLEEP);
617 wl1271_power_on(wl); 649 wl1271_power_on(wl);
618 msleep(WL1271_POWER_ON_SLEEP); 650 msleep(WL1271_POWER_ON_SLEEP);
619 wl1271_spi_reset(wl); 651 wl1271_io_reset(wl);
620 wl1271_spi_init(wl); 652 wl1271_io_init(wl);
621 653
622 /* We don't need a real memory partition here, because we only want 654 /* We don't need a real memory partition here, because we only want
623 * to use the registers at this point. */ 655 * to use the registers at this point. */
@@ -632,7 +664,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
632 /* whal_FwCtrl_BootSm() */ 664 /* whal_FwCtrl_BootSm() */
633 665
634 /* 0. read chip id from CHIP_ID */ 666 /* 0. read chip id from CHIP_ID */
635 wl->chip.id = wl1271_spi_read32(wl, CHIP_ID_B); 667 wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
636 668
637 /* 1. check if chip id is valid */ 669 /* 1. check if chip id is valid */
638 670
@@ -643,7 +675,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
643 675
644 ret = wl1271_setup(wl); 676 ret = wl1271_setup(wl);
645 if (ret < 0) 677 if (ret < 0)
646 goto out_power_off; 678 goto out;
647 break; 679 break;
648 case CHIP_ID_1271_PG20: 680 case CHIP_ID_1271_PG20:
649 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)", 681 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
@@ -651,38 +683,34 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
651 683
652 ret = wl1271_setup(wl); 684 ret = wl1271_setup(wl);
653 if (ret < 0) 685 if (ret < 0)
654 goto out_power_off; 686 goto out;
655 break; 687 break;
656 default: 688 default:
657 wl1271_error("unsupported chip id: 0x%x", wl->chip.id); 689 wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
658 ret = -ENODEV; 690 ret = -ENODEV;
659 goto out_power_off; 691 goto out;
660 } 692 }
661 693
662 if (wl->fw == NULL) { 694 if (wl->fw == NULL) {
663 ret = wl1271_fetch_firmware(wl); 695 ret = wl1271_fetch_firmware(wl);
664 if (ret < 0) 696 if (ret < 0)
665 goto out_power_off; 697 goto out;
666 } 698 }
667 699
668 /* No NVS from netlink, try to get it from the filesystem */ 700 /* No NVS from netlink, try to get it from the filesystem */
669 if (wl->nvs == NULL) { 701 if (wl->nvs == NULL) {
670 ret = wl1271_fetch_nvs(wl); 702 ret = wl1271_fetch_nvs(wl);
671 if (ret < 0) 703 if (ret < 0)
672 goto out_power_off; 704 goto out;
673 } 705 }
674 706
675 goto out;
676
677out_power_off:
678 wl1271_power_off(wl);
679
680out: 707out:
681 return ret; 708 return ret;
682} 709}
683 710
684int wl1271_plt_start(struct wl1271 *wl) 711int wl1271_plt_start(struct wl1271 *wl)
685{ 712{
713 int retries = WL1271_BOOT_RETRIES;
686 int ret; 714 int ret;
687 715
688 mutex_lock(&wl->mutex); 716 mutex_lock(&wl->mutex);
@@ -696,35 +724,43 @@ int wl1271_plt_start(struct wl1271 *wl)
696 goto out; 724 goto out;
697 } 725 }
698 726
699 wl->state = WL1271_STATE_PLT; 727 while (retries) {
700 728 retries--;
701 ret = wl1271_chip_wakeup(wl); 729 ret = wl1271_chip_wakeup(wl);
702 if (ret < 0) 730 if (ret < 0)
703 goto out; 731 goto power_off;
704
705 ret = wl1271_boot(wl);
706 if (ret < 0)
707 goto out_power_off;
708
709 wl1271_notice("firmware booted in PLT mode (%s)", wl->chip.fw_ver);
710
711 ret = wl1271_plt_init(wl);
712 if (ret < 0)
713 goto out_irq_disable;
714 732
715 /* Make sure power saving is disabled */ 733 ret = wl1271_boot(wl);
716 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM); 734 if (ret < 0)
717 if (ret < 0) 735 goto power_off;
718 goto out_irq_disable;
719 736
720 goto out; 737 ret = wl1271_plt_init(wl);
738 if (ret < 0)
739 goto irq_disable;
721 740
722out_irq_disable: 741 wl->state = WL1271_STATE_PLT;
723 wl1271_disable_interrupts(wl); 742 wl1271_notice("firmware booted in PLT mode (%s)",
743 wl->chip.fw_ver);
744 goto out;
724 745
725out_power_off: 746irq_disable:
726 wl1271_power_off(wl); 747 wl1271_disable_interrupts(wl);
748 mutex_unlock(&wl->mutex);
749 /* Unlocking the mutex in the middle of handling is
750 inherently unsafe. In this case we deem it safe to do,
751 because we need to let any possibly pending IRQ out of
752 the system (and while we are WL1271_STATE_OFF the IRQ
753 work function will not do anything.) Also, any other
754 possible concurrent operations will fail due to the
755 current state, hence the wl1271 struct should be safe. */
756 cancel_work_sync(&wl->irq_work);
757 mutex_lock(&wl->mutex);
758power_off:
759 wl1271_power_off(wl);
760 }
727 761
762 wl1271_error("firmware boot in PLT mode failed despite %d retries",
763 WL1271_BOOT_RETRIES);
728out: 764out:
729 mutex_unlock(&wl->mutex); 765 mutex_unlock(&wl->mutex);
730 766
@@ -762,7 +798,20 @@ out:
762static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 798static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
763{ 799{
764 struct wl1271 *wl = hw->priv; 800 struct wl1271 *wl = hw->priv;
801 struct ieee80211_conf *conf = &hw->conf;
802 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
803 struct ieee80211_sta *sta = txinfo->control.sta;
804 unsigned long flags;
805
806 /* peek into the rates configured in the STA entry */
807 spin_lock_irqsave(&wl->wl_lock, flags);
808 if (sta && sta->supp_rates[conf->channel->band] != wl->sta_rate_set) {
809 wl->sta_rate_set = sta->supp_rates[conf->channel->band];
810 set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
811 }
812 spin_unlock_irqrestore(&wl->wl_lock, flags);
765 813
814 /* queue the packet */
766 skb_queue_tail(&wl->tx_queue, skb); 815 skb_queue_tail(&wl->tx_queue, skb);
767 816
768 /* 817 /*
@@ -784,7 +833,7 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
784 * protected. Maybe fix this by removing the stupid 833 * protected. Maybe fix this by removing the stupid
785 * variable altogether and checking the real queue state? 834 * variable altogether and checking the real queue state?
786 */ 835 */
787 wl->tx_queue_stopped = true; 836 set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
788 } 837 }
789 838
790 return NETDEV_TX_OK; 839 return NETDEV_TX_OK;
@@ -880,6 +929,7 @@ static struct notifier_block wl1271_dev_notifier = {
880static int wl1271_op_start(struct ieee80211_hw *hw) 929static int wl1271_op_start(struct ieee80211_hw *hw)
881{ 930{
882 struct wl1271 *wl = hw->priv; 931 struct wl1271 *wl = hw->priv;
932 int retries = WL1271_BOOT_RETRIES;
883 int ret = 0; 933 int ret = 0;
884 934
885 wl1271_debug(DEBUG_MAC80211, "mac80211 start"); 935 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
@@ -893,30 +943,42 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
893 goto out; 943 goto out;
894 } 944 }
895 945
896 ret = wl1271_chip_wakeup(wl); 946 while (retries) {
897 if (ret < 0) 947 retries--;
898 goto out; 948 ret = wl1271_chip_wakeup(wl);
899 949 if (ret < 0)
900 ret = wl1271_boot(wl); 950 goto power_off;
901 if (ret < 0)
902 goto out_power_off;
903
904 ret = wl1271_hw_init(wl);
905 if (ret < 0)
906 goto out_irq_disable;
907
908 wl->state = WL1271_STATE_ON;
909 951
910 wl1271_info("firmware booted (%s)", wl->chip.fw_ver); 952 ret = wl1271_boot(wl);
953 if (ret < 0)
954 goto power_off;
911 955
912 goto out; 956 ret = wl1271_hw_init(wl);
957 if (ret < 0)
958 goto irq_disable;
913 959
914out_irq_disable: 960 wl->state = WL1271_STATE_ON;
915 wl1271_disable_interrupts(wl); 961 wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
962 goto out;
916 963
917out_power_off: 964irq_disable:
918 wl1271_power_off(wl); 965 wl1271_disable_interrupts(wl);
966 mutex_unlock(&wl->mutex);
967 /* Unlocking the mutex in the middle of handling is
968 inherently unsafe. In this case we deem it safe to do,
969 because we need to let any possibly pending IRQ out of
970 the system (and while we are WL1271_STATE_OFF the IRQ
971 work function will not do anything.) Also, any other
972 possible concurrent operations will fail due to the
973 current state, hence the wl1271 struct should be safe. */
974 cancel_work_sync(&wl->irq_work);
975 mutex_lock(&wl->mutex);
976power_off:
977 wl1271_power_off(wl);
978 }
919 979
980 wl1271_error("firmware boot failed despite %d retries",
981 WL1271_BOOT_RETRIES);
920out: 982out:
921 mutex_unlock(&wl->mutex); 983 mutex_unlock(&wl->mutex);
922 984
@@ -944,11 +1006,10 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
944 1006
945 WARN_ON(wl->state != WL1271_STATE_ON); 1007 WARN_ON(wl->state != WL1271_STATE_ON);
946 1008
947 if (wl->scanning) { 1009 if (test_and_clear_bit(WL1271_FLAG_SCANNING, &wl->flags)) {
948 mutex_unlock(&wl->mutex); 1010 mutex_unlock(&wl->mutex);
949 ieee80211_scan_completed(wl->hw, true); 1011 ieee80211_scan_completed(wl->hw, true);
950 mutex_lock(&wl->mutex); 1012 mutex_lock(&wl->mutex);
951 wl->scanning = false;
952 } 1013 }
953 1014
954 wl->state = WL1271_STATE_OFF; 1015 wl->state = WL1271_STATE_OFF;
@@ -973,10 +1034,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
973 wl->band = IEEE80211_BAND_2GHZ; 1034 wl->band = IEEE80211_BAND_2GHZ;
974 1035
975 wl->rx_counter = 0; 1036 wl->rx_counter = 0;
976 wl->elp = false;
977 wl->psm = 0;
978 wl->psm_entry_retry = 0; 1037 wl->psm_entry_retry = 0;
979 wl->tx_queue_stopped = false;
980 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 1038 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
981 wl->tx_blocks_available = 0; 1039 wl->tx_blocks_available = 0;
982 wl->tx_results_count = 0; 1040 wl->tx_results_count = 0;
@@ -986,7 +1044,9 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
986 wl->tx_security_seq_32 = 0; 1044 wl->tx_security_seq_32 = 0;
987 wl->time_offset = 0; 1045 wl->time_offset = 0;
988 wl->session_counter = 0; 1046 wl->session_counter = 0;
989 wl->joined = false; 1047 wl->rate_set = CONF_TX_RATE_MASK_BASIC;
1048 wl->sta_rate_set = 0;
1049 wl->flags = 0;
990 1050
991 for (i = 0; i < NUM_TX_QUEUES; i++) 1051 for (i = 0; i < NUM_TX_QUEUES; i++)
992 wl->tx_blocks_freed[i] = 0; 1052 wl->tx_blocks_freed[i] = 0;
@@ -996,13 +1056,13 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
996} 1056}
997 1057
998static int wl1271_op_add_interface(struct ieee80211_hw *hw, 1058static int wl1271_op_add_interface(struct ieee80211_hw *hw,
999 struct ieee80211_if_init_conf *conf) 1059 struct ieee80211_vif *vif)
1000{ 1060{
1001 struct wl1271 *wl = hw->priv; 1061 struct wl1271 *wl = hw->priv;
1002 int ret = 0; 1062 int ret = 0;
1003 1063
1004 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", 1064 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
1005 conf->type, conf->mac_addr); 1065 vif->type, vif->addr);
1006 1066
1007 mutex_lock(&wl->mutex); 1067 mutex_lock(&wl->mutex);
1008 if (wl->vif) { 1068 if (wl->vif) {
@@ -1010,9 +1070,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
1010 goto out; 1070 goto out;
1011 } 1071 }
1012 1072
1013 wl->vif = conf->vif; 1073 wl->vif = vif;
1014 1074
1015 switch (conf->type) { 1075 switch (vif->type) {
1016 case NL80211_IFTYPE_STATION: 1076 case NL80211_IFTYPE_STATION:
1017 wl->bss_type = BSS_TYPE_STA_BSS; 1077 wl->bss_type = BSS_TYPE_STA_BSS;
1018 break; 1078 break;
@@ -1032,7 +1092,7 @@ out:
1032} 1092}
1033 1093
1034static void wl1271_op_remove_interface(struct ieee80211_hw *hw, 1094static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
1035 struct ieee80211_if_init_conf *conf) 1095 struct ieee80211_vif *vif)
1036{ 1096{
1037 struct wl1271 *wl = hw->priv; 1097 struct wl1271 *wl = hw->priv;
1038 1098
@@ -1109,6 +1169,51 @@ out:
1109} 1169}
1110#endif 1170#endif
1111 1171
1172static int wl1271_join_channel(struct wl1271 *wl, int channel)
1173{
1174 int ret = 0;
1175 /* we need to use a dummy BSSID for now */
1176 static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde,
1177 0xad, 0xbe, 0xef };
1178
1179 /* the dummy join is not required for ad-hoc */
1180 if (wl->bss_type == BSS_TYPE_IBSS)
1181 goto out;
1182
1183 /* disable mac filter, so we hear everything */
1184 wl->rx_config &= ~CFG_BSSID_FILTER_EN;
1185
1186 wl->channel = channel;
1187 memcpy(wl->bssid, dummy_bssid, ETH_ALEN);
1188
1189 ret = wl1271_cmd_join(wl);
1190 if (ret < 0)
1191 goto out;
1192
1193 set_bit(WL1271_FLAG_JOINED, &wl->flags);
1194
1195out:
1196 return ret;
1197}
1198
1199static int wl1271_unjoin_channel(struct wl1271 *wl)
1200{
1201 int ret;
1202
1203 /* to stop listening to a channel, we disconnect */
1204 ret = wl1271_cmd_disconnect(wl);
1205 if (ret < 0)
1206 goto out;
1207
1208 clear_bit(WL1271_FLAG_JOINED, &wl->flags);
1209 wl->channel = 0;
1210 memset(wl->bssid, 0, ETH_ALEN);
1211 wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
1212
1213out:
1214 return ret;
1215}
1216
1112static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) 1217static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1113{ 1218{
1114 struct wl1271 *wl = hw->priv; 1219 struct wl1271 *wl = hw->priv;
@@ -1117,10 +1222,11 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1117 1222
1118 channel = ieee80211_frequency_to_channel(conf->channel->center_freq); 1223 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
1119 1224
1120 wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d", 1225 wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s",
1121 channel, 1226 channel,
1122 conf->flags & IEEE80211_CONF_PS ? "on" : "off", 1227 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
1123 conf->power_level); 1228 conf->power_level,
1229 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use");
1124 1230
1125 mutex_lock(&wl->mutex); 1231 mutex_lock(&wl->mutex);
1126 1232
@@ -1130,35 +1236,55 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1130 if (ret < 0) 1236 if (ret < 0)
1131 goto out; 1237 goto out;
1132 1238
1133 if (channel != wl->channel) { 1239 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1134 /* 1240 if (conf->flags & IEEE80211_CONF_IDLE &&
1135 * We assume that the stack will configure the right channel 1241 test_bit(WL1271_FLAG_JOINED, &wl->flags))
1136 * before associating, so we don't need to send a join 1242 wl1271_unjoin_channel(wl);
1137 * command here. We will join the right channel when the 1243 else if (!(conf->flags & IEEE80211_CONF_IDLE))
1138 * BSSID changes 1244 wl1271_join_channel(wl, channel);
1139 */ 1245
1140 wl->channel = channel; 1246 if (conf->flags & IEEE80211_CONF_IDLE) {
1247 wl->rate_set = CONF_TX_RATE_MASK_BASIC;
1248 wl->sta_rate_set = 0;
1249 wl1271_acx_rate_policies(wl);
1250 }
1141 } 1251 }
1142 1252
1143 if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) { 1253 /* if the channel changes while joined, join again */
1144 wl1271_info("psm enabled"); 1254 if (channel != wl->channel &&
1255 test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
1256 wl->channel = channel;
1257 /* FIXME: maybe use CMD_CHANNEL_SWITCH for this? */
1258 ret = wl1271_cmd_join(wl);
1259 if (ret < 0)
1260 wl1271_warning("cmd join to update channel failed %d",
1261 ret);
1262 } else
1263 wl->channel = channel;
1145 1264
1146 wl->psm_requested = true; 1265 if (conf->flags & IEEE80211_CONF_PS &&
1266 !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
1267 set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
1147 1268
1148 /* 1269 /*
1149 * We enter PSM only if we're already associated. 1270 * We enter PSM only if we're already associated.
1150 * If we're not, we'll enter it when joining an SSID, 1271 * If we're not, we'll enter it when joining an SSID,
1151 * through the bss_info_changed() hook. 1272 * through the bss_info_changed() hook.
1152 */ 1273 */
1153 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE); 1274 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
1275 wl1271_info("psm enabled");
1276 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
1277 true);
1278 }
1154 } else if (!(conf->flags & IEEE80211_CONF_PS) && 1279 } else if (!(conf->flags & IEEE80211_CONF_PS) &&
1155 wl->psm_requested) { 1280 test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
1156 wl1271_info("psm disabled"); 1281 wl1271_info("psm disabled");
1157 1282
1158 wl->psm_requested = false; 1283 clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
1159 1284
1160 if (wl->psm) 1285 if (test_bit(WL1271_FLAG_PSM, &wl->flags))
1161 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE); 1286 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
1287 true);
1162 } 1288 }
1163 1289
1164 if (conf->power_level != wl->power_level) { 1290 if (conf->power_level != wl->power_level) {
@@ -1350,9 +1476,24 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1350 wl1271_error("Could not add or replace key"); 1476 wl1271_error("Could not add or replace key");
1351 goto out_sleep; 1477 goto out_sleep;
1352 } 1478 }
1479
1480 /* the default WEP key needs to be configured at least once */
1481 if (key_type == KEY_WEP) {
1482 ret = wl1271_cmd_set_default_wep_key(wl,
1483 wl->default_key);
1484 if (ret < 0)
1485 goto out_sleep;
1486 }
1353 break; 1487 break;
1354 1488
1355 case DISABLE_KEY: 1489 case DISABLE_KEY:
1490 /* The wl1271 does not allow to remove unicast keys - they
1491 will be cleared automatically on next CMD_JOIN. Ignore the
1492 request silently, as we dont want the mac80211 to emit
1493 an error message. */
1494 if (!is_broadcast_ether_addr(addr))
1495 break;
1496
1356 ret = wl1271_cmd_set_key(wl, KEY_REMOVE, 1497 ret = wl1271_cmd_set_key(wl, KEY_REMOVE,
1357 key_conf->keyidx, key_type, 1498 key_conf->keyidx, key_type,
1358 key_conf->keylen, key_conf->key, 1499 key_conf->keylen, key_conf->key,
@@ -1440,20 +1581,21 @@ out:
1440 return ret; 1581 return ret;
1441} 1582}
1442 1583
1443static u32 wl1271_enabled_rates_get(struct wl1271 *wl, u64 basic_rate_set) 1584static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *beacon)
1444{ 1585{
1445 struct ieee80211_supported_band *band; 1586 u8 *ptr = beacon->data +
1446 u32 enabled_rates = 0; 1587 offsetof(struct ieee80211_mgmt, u.beacon.variable);
1447 int bit; 1588
1448 1589 /* find the location of the ssid in the beacon */
1449 band = wl->hw->wiphy->bands[wl->band]; 1590 while (ptr < beacon->data + beacon->len) {
1450 for (bit = 0; bit < band->n_bitrates; bit++) { 1591 if (ptr[0] == WLAN_EID_SSID) {
1451 if (basic_rate_set & 0x1) 1592 wl->ssid_len = ptr[1];
1452 enabled_rates |= band->bitrates[bit].hw_value; 1593 memcpy(wl->ssid, ptr+2, wl->ssid_len);
1453 basic_rate_set >>= 1; 1594 return;
1595 }
1596 ptr += ptr[1];
1454 } 1597 }
1455 1598 wl1271_error("ad-hoc beacon template has no SSID!\n");
1456 return enabled_rates;
1457} 1599}
1458 1600
1459static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, 1601static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
@@ -1463,6 +1605,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1463{ 1605{
1464 enum wl1271_cmd_ps_mode mode; 1606 enum wl1271_cmd_ps_mode mode;
1465 struct wl1271 *wl = hw->priv; 1607 struct wl1271 *wl = hw->priv;
1608 bool do_join = false;
1466 int ret; 1609 int ret;
1467 1610
1468 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed"); 1611 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed");
@@ -1473,9 +1616,67 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1473 if (ret < 0) 1616 if (ret < 0)
1474 goto out; 1617 goto out;
1475 1618
1619 if (wl->bss_type == BSS_TYPE_IBSS) {
1620 /* FIXME: This implements rudimentary ad-hoc support -
1621 proper templates are on the wish list and notification
1622 on when they change. This patch will update the templates
1623 on every call to this function. */
1624 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
1625
1626 if (beacon) {
1627 struct ieee80211_hdr *hdr;
1628
1629 wl1271_ssid_set(wl, beacon);
1630 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
1631 beacon->data,
1632 beacon->len);
1633
1634 if (ret < 0) {
1635 dev_kfree_skb(beacon);
1636 goto out_sleep;
1637 }
1638
1639 hdr = (struct ieee80211_hdr *) beacon->data;
1640 hdr->frame_control = cpu_to_le16(
1641 IEEE80211_FTYPE_MGMT |
1642 IEEE80211_STYPE_PROBE_RESP);
1643
1644 ret = wl1271_cmd_template_set(wl,
1645 CMD_TEMPL_PROBE_RESPONSE,
1646 beacon->data,
1647 beacon->len);
1648 dev_kfree_skb(beacon);
1649 if (ret < 0)
1650 goto out_sleep;
1651
1652 /* Need to update the SSID (for filtering etc) */
1653 do_join = true;
1654 }
1655 }
1656
1657 if ((changed & BSS_CHANGED_BSSID) &&
1658 /*
1659 * Now we know the correct bssid, so we send a new join command
1660 * and enable the BSSID filter
1661 */
1662 memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
1663 wl->rx_config |= CFG_BSSID_FILTER_EN;
1664 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
1665 ret = wl1271_cmd_build_null_data(wl);
1666 if (ret < 0) {
1667 wl1271_warning("cmd buld null data failed %d",
1668 ret);
1669 goto out_sleep;
1670 }
1671
1672 /* Need to update the BSSID (for filtering etc) */
1673 do_join = true;
1674 }
1675
1476 if (changed & BSS_CHANGED_ASSOC) { 1676 if (changed & BSS_CHANGED_ASSOC) {
1477 if (bss_conf->assoc) { 1677 if (bss_conf->assoc) {
1478 wl->aid = bss_conf->aid; 1678 wl->aid = bss_conf->aid;
1679 set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
1479 1680
1480 /* 1681 /*
1481 * with wl1271, we don't need to update the 1682 * with wl1271, we don't need to update the
@@ -1492,15 +1693,16 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1492 goto out_sleep; 1693 goto out_sleep;
1493 1694
1494 /* If we want to go in PSM but we're not there yet */ 1695 /* If we want to go in PSM but we're not there yet */
1495 if (wl->psm_requested && !wl->psm) { 1696 if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
1697 !test_bit(WL1271_FLAG_PSM, &wl->flags)) {
1496 mode = STATION_POWER_SAVE_MODE; 1698 mode = STATION_POWER_SAVE_MODE;
1497 ret = wl1271_ps_set_mode(wl, mode); 1699 ret = wl1271_ps_set_mode(wl, mode, true);
1498 if (ret < 0) 1700 if (ret < 0)
1499 goto out_sleep; 1701 goto out_sleep;
1500 } 1702 }
1501 } else { 1703 } else {
1502 /* use defaults when not associated */ 1704 /* use defaults when not associated */
1503 wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET; 1705 clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
1504 wl->aid = 0; 1706 wl->aid = 0;
1505 } 1707 }
1506 1708
@@ -1535,15 +1737,13 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1535 } 1737 }
1536 } 1738 }
1537 1739
1538 if (changed & BSS_CHANGED_BASIC_RATES) { 1740 if (do_join) {
1539 wl->basic_rate_set = wl1271_enabled_rates_get( 1741 ret = wl1271_cmd_join(wl);
1540 wl, bss_conf->basic_rates);
1541
1542 ret = wl1271_acx_rate_policies(wl, wl->basic_rate_set);
1543 if (ret < 0) { 1742 if (ret < 0) {
1544 wl1271_warning("Set rate policies failed %d", ret); 1743 wl1271_warning("cmd join failed %d", ret);
1545 goto out_sleep; 1744 goto out_sleep;
1546 } 1745 }
1746 set_bit(WL1271_FLAG_JOINED, &wl->flags);
1547 } 1747 }
1548 1748
1549out_sleep: 1749out_sleep:
@@ -1553,6 +1753,43 @@ out:
1553 mutex_unlock(&wl->mutex); 1753 mutex_unlock(&wl->mutex);
1554} 1754}
1555 1755
1756static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
1757 const struct ieee80211_tx_queue_params *params)
1758{
1759 struct wl1271 *wl = hw->priv;
1760 int ret;
1761
1762 mutex_lock(&wl->mutex);
1763
1764 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
1765
1766 ret = wl1271_ps_elp_wakeup(wl, false);
1767 if (ret < 0)
1768 goto out;
1769
1770 ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
1771 params->cw_min, params->cw_max,
1772 params->aifs, params->txop);
1773 if (ret < 0)
1774 goto out_sleep;
1775
1776 ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
1777 CONF_CHANNEL_TYPE_EDCF,
1778 wl1271_tx_get_queue(queue),
1779 CONF_PS_SCHEME_LEGACY_PSPOLL,
1780 CONF_ACK_POLICY_LEGACY, 0, 0);
1781 if (ret < 0)
1782 goto out_sleep;
1783
1784out_sleep:
1785 wl1271_ps_elp_sleep(wl);
1786
1787out:
1788 mutex_unlock(&wl->mutex);
1789
1790 return ret;
1791}
1792
1556 1793
1557/* can't be const, mac80211 writes to this */ 1794/* can't be const, mac80211 writes to this */
1558static struct ieee80211_rate wl1271_rates[] = { 1795static struct ieee80211_rate wl1271_rates[] = {
@@ -1599,19 +1836,19 @@ static struct ieee80211_rate wl1271_rates[] = {
1599 1836
1600/* can't be const, mac80211 writes to this */ 1837/* can't be const, mac80211 writes to this */
1601static struct ieee80211_channel wl1271_channels[] = { 1838static struct ieee80211_channel wl1271_channels[] = {
1602 { .hw_value = 1, .center_freq = 2412}, 1839 { .hw_value = 1, .center_freq = 2412, .max_power = 25 },
1603 { .hw_value = 2, .center_freq = 2417}, 1840 { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
1604 { .hw_value = 3, .center_freq = 2422}, 1841 { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
1605 { .hw_value = 4, .center_freq = 2427}, 1842 { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
1606 { .hw_value = 5, .center_freq = 2432}, 1843 { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
1607 { .hw_value = 6, .center_freq = 2437}, 1844 { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
1608 { .hw_value = 7, .center_freq = 2442}, 1845 { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
1609 { .hw_value = 8, .center_freq = 2447}, 1846 { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
1610 { .hw_value = 9, .center_freq = 2452}, 1847 { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
1611 { .hw_value = 10, .center_freq = 2457}, 1848 { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
1612 { .hw_value = 11, .center_freq = 2462}, 1849 { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
1613 { .hw_value = 12, .center_freq = 2467}, 1850 { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
1614 { .hw_value = 13, .center_freq = 2472}, 1851 { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
1615}; 1852};
1616 1853
1617/* can't be const, mac80211 writes to this */ 1854/* can't be const, mac80211 writes to this */
@@ -1718,6 +1955,8 @@ static const struct ieee80211_ops wl1271_ops = {
1718 .hw_scan = wl1271_op_hw_scan, 1955 .hw_scan = wl1271_op_hw_scan,
1719 .bss_info_changed = wl1271_op_bss_info_changed, 1956 .bss_info_changed = wl1271_op_bss_info_changed,
1720 .set_rts_threshold = wl1271_op_set_rts_threshold, 1957 .set_rts_threshold = wl1271_op_set_rts_threshold,
1958 .conf_tx = wl1271_op_conf_tx,
1959 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
1721}; 1960};
1722 1961
1723static int wl1271_register_hw(struct wl1271 *wl) 1962static int wl1271_register_hw(struct wl1271 *wl)
@@ -1757,7 +1996,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
1757 IEEE80211_HW_BEACON_FILTER | 1996 IEEE80211_HW_BEACON_FILTER |
1758 IEEE80211_HW_SUPPORTS_PS; 1997 IEEE80211_HW_SUPPORTS_PS;
1759 1998
1760 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 1999 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
2000 BIT(NL80211_IFTYPE_ADHOC);
1761 wl->hw->wiphy->max_scan_ssids = 1; 2001 wl->hw->wiphy->max_scan_ssids = 1;
1762 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz; 2002 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz;
1763 2003
@@ -1785,24 +2025,17 @@ static struct platform_device wl1271_device = {
1785}; 2025};
1786 2026
1787#define WL1271_DEFAULT_CHANNEL 0 2027#define WL1271_DEFAULT_CHANNEL 0
1788static int __devinit wl1271_probe(struct spi_device *spi) 2028
2029static struct ieee80211_hw *wl1271_alloc_hw(void)
1789{ 2030{
1790 struct wl12xx_platform_data *pdata;
1791 struct ieee80211_hw *hw; 2031 struct ieee80211_hw *hw;
1792 struct wl1271 *wl; 2032 struct wl1271 *wl;
1793 int ret, i; 2033 int i;
1794 static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf};
1795
1796 pdata = spi->dev.platform_data;
1797 if (!pdata) {
1798 wl1271_error("no platform data");
1799 return -ENODEV;
1800 }
1801 2034
1802 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops); 2035 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
1803 if (!hw) { 2036 if (!hw) {
1804 wl1271_error("could not alloc ieee80211_hw"); 2037 wl1271_error("could not alloc ieee80211_hw");
1805 return -ENOMEM; 2038 return ERR_PTR(-ENOMEM);
1806 } 2039 }
1807 2040
1808 wl = hw->priv; 2041 wl = hw->priv;
@@ -1811,44 +2044,80 @@ static int __devinit wl1271_probe(struct spi_device *spi)
1811 INIT_LIST_HEAD(&wl->list); 2044 INIT_LIST_HEAD(&wl->list);
1812 2045
1813 wl->hw = hw; 2046 wl->hw = hw;
1814 dev_set_drvdata(&spi->dev, wl);
1815 wl->spi = spi;
1816 2047
1817 skb_queue_head_init(&wl->tx_queue); 2048 skb_queue_head_init(&wl->tx_queue);
1818 2049
1819 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); 2050 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
1820 wl->channel = WL1271_DEFAULT_CHANNEL; 2051 wl->channel = WL1271_DEFAULT_CHANNEL;
1821 wl->scanning = false;
1822 wl->default_key = 0; 2052 wl->default_key = 0;
1823 wl->rx_counter = 0; 2053 wl->rx_counter = 0;
1824 wl->rx_config = WL1271_DEFAULT_RX_CONFIG; 2054 wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
1825 wl->rx_filter = WL1271_DEFAULT_RX_FILTER; 2055 wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
1826 wl->elp = false;
1827 wl->psm = 0;
1828 wl->psm_requested = false;
1829 wl->psm_entry_retry = 0; 2056 wl->psm_entry_retry = 0;
1830 wl->tx_queue_stopped = false;
1831 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 2057 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1832 wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET; 2058 wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2059 wl->rate_set = CONF_TX_RATE_MASK_BASIC;
2060 wl->sta_rate_set = 0;
1833 wl->band = IEEE80211_BAND_2GHZ; 2061 wl->band = IEEE80211_BAND_2GHZ;
1834 wl->vif = NULL; 2062 wl->vif = NULL;
1835 wl->joined = false; 2063 wl->flags = 0;
1836 2064
1837 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 2065 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
1838 wl->tx_frames[i] = NULL; 2066 wl->tx_frames[i] = NULL;
1839 2067
1840 spin_lock_init(&wl->wl_lock); 2068 spin_lock_init(&wl->wl_lock);
1841 2069
1842 /*
1843 * In case our MAC address is not correctly set,
1844 * we use a random but Nokia MAC.
1845 */
1846 memcpy(wl->mac_addr, nokia_oui, 3);
1847 get_random_bytes(wl->mac_addr + 3, 3);
1848
1849 wl->state = WL1271_STATE_OFF; 2070 wl->state = WL1271_STATE_OFF;
1850 mutex_init(&wl->mutex); 2071 mutex_init(&wl->mutex);
1851 2072
2073 /* Apply default driver configuration. */
2074 wl1271_conf_init(wl);
2075
2076 return hw;
2077}
2078
2079int wl1271_free_hw(struct wl1271 *wl)
2080{
2081 ieee80211_unregister_hw(wl->hw);
2082
2083 wl1271_debugfs_exit(wl);
2084
2085 kfree(wl->target_mem_map);
2086 vfree(wl->fw);
2087 wl->fw = NULL;
2088 kfree(wl->nvs);
2089 wl->nvs = NULL;
2090
2091 kfree(wl->fw_status);
2092 kfree(wl->tx_res_if);
2093
2094 ieee80211_free_hw(wl->hw);
2095
2096 return 0;
2097}
2098
2099static int __devinit wl1271_probe(struct spi_device *spi)
2100{
2101 struct wl12xx_platform_data *pdata;
2102 struct ieee80211_hw *hw;
2103 struct wl1271 *wl;
2104 int ret;
2105
2106 pdata = spi->dev.platform_data;
2107 if (!pdata) {
2108 wl1271_error("no platform data");
2109 return -ENODEV;
2110 }
2111
2112 hw = wl1271_alloc_hw();
2113 if (IS_ERR(hw))
2114 return PTR_ERR(hw);
2115
2116 wl = hw->priv;
2117
2118 dev_set_drvdata(&spi->dev, wl);
2119 wl->spi = spi;
2120
1852 /* This is the only SPI value that we need to set here, the rest 2121 /* This is the only SPI value that we need to set here, the rest
1853 * comes from the board-peripherals file */ 2122 * comes from the board-peripherals file */
1854 spi->bits_per_word = 32; 2123 spi->bits_per_word = 32;
@@ -1890,9 +2159,6 @@ static int __devinit wl1271_probe(struct spi_device *spi)
1890 } 2159 }
1891 dev_set_drvdata(&wl1271_device.dev, wl); 2160 dev_set_drvdata(&wl1271_device.dev, wl);
1892 2161
1893 /* Apply default driver configuration. */
1894 wl1271_conf_init(wl);
1895
1896 ret = wl1271_init_ieee80211(wl); 2162 ret = wl1271_init_ieee80211(wl);
1897 if (ret) 2163 if (ret)
1898 goto out_platform; 2164 goto out_platform;
@@ -1923,21 +2189,10 @@ static int __devexit wl1271_remove(struct spi_device *spi)
1923{ 2189{
1924 struct wl1271 *wl = dev_get_drvdata(&spi->dev); 2190 struct wl1271 *wl = dev_get_drvdata(&spi->dev);
1925 2191
1926 ieee80211_unregister_hw(wl->hw);
1927
1928 wl1271_debugfs_exit(wl);
1929 platform_device_unregister(&wl1271_device); 2192 platform_device_unregister(&wl1271_device);
1930 free_irq(wl->irq, wl); 2193 free_irq(wl->irq, wl);
1931 kfree(wl->target_mem_map);
1932 vfree(wl->fw);
1933 wl->fw = NULL;
1934 kfree(wl->nvs);
1935 wl->nvs = NULL;
1936 2194
1937 kfree(wl->fw_status); 2195 wl1271_free_hw(wl);
1938 kfree(wl->tx_res_if);
1939
1940 ieee80211_free_hw(wl->hw);
1941 2196
1942 return 0; 2197 return 0;
1943} 2198}
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.c b/drivers/net/wireless/wl12xx/wl1271_ps.c
index 507cd91d7eed..e2b1ebf096e8 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.c
@@ -24,6 +24,7 @@
24#include "wl1271_reg.h" 24#include "wl1271_reg.h"
25#include "wl1271_ps.h" 25#include "wl1271_ps.h"
26#include "wl1271_spi.h" 26#include "wl1271_spi.h"
27#include "wl1271_io.h"
27 28
28#define WL1271_WAKEUP_TIMEOUT 500 29#define WL1271_WAKEUP_TIMEOUT 500
29 30
@@ -39,12 +40,13 @@ void wl1271_elp_work(struct work_struct *work)
39 40
40 mutex_lock(&wl->mutex); 41 mutex_lock(&wl->mutex);
41 42
42 if (wl->elp || !wl->psm) 43 if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) ||
44 !test_bit(WL1271_FLAG_PSM, &wl->flags))
43 goto out; 45 goto out;
44 46
45 wl1271_debug(DEBUG_PSM, "chip to elp"); 47 wl1271_debug(DEBUG_PSM, "chip to elp");
46 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP); 48 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
47 wl->elp = true; 49 set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
48 50
49out: 51out:
50 mutex_unlock(&wl->mutex); 52 mutex_unlock(&wl->mutex);
@@ -55,7 +57,7 @@ out:
55/* Routines to toggle sleep mode while in ELP */ 57/* Routines to toggle sleep mode while in ELP */
56void wl1271_ps_elp_sleep(struct wl1271 *wl) 58void wl1271_ps_elp_sleep(struct wl1271 *wl)
57{ 59{
58 if (wl->psm) { 60 if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
59 cancel_delayed_work(&wl->elp_work); 61 cancel_delayed_work(&wl->elp_work);
60 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, 62 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
61 msecs_to_jiffies(ELP_ENTRY_DELAY)); 63 msecs_to_jiffies(ELP_ENTRY_DELAY));
@@ -70,7 +72,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
70 u32 start_time = jiffies; 72 u32 start_time = jiffies;
71 bool pending = false; 73 bool pending = false;
72 74
73 if (!wl->elp) 75 if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
74 return 0; 76 return 0;
75 77
76 wl1271_debug(DEBUG_PSM, "waking up chip from elp"); 78 wl1271_debug(DEBUG_PSM, "waking up chip from elp");
@@ -101,7 +103,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
101 } 103 }
102 } 104 }
103 105
104 wl->elp = false; 106 clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
105 107
106 wl1271_debug(DEBUG_PSM, "wakeup time: %u ms", 108 wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
107 jiffies_to_msecs(jiffies - start_time)); 109 jiffies_to_msecs(jiffies - start_time));
@@ -117,7 +119,8 @@ out:
117 return 0; 119 return 0;
118} 120}
119 121
120int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode) 122int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
123 bool send)
121{ 124{
122 int ret; 125 int ret;
123 126
@@ -125,25 +128,11 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
125 case STATION_POWER_SAVE_MODE: 128 case STATION_POWER_SAVE_MODE:
126 wl1271_debug(DEBUG_PSM, "entering psm"); 129 wl1271_debug(DEBUG_PSM, "entering psm");
127 130
128 /* enable beacon filtering */ 131 ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE, send);
129 ret = wl1271_acx_beacon_filter_opt(wl, true);
130 if (ret < 0) 132 if (ret < 0)
131 return ret; 133 return ret;
132 134
133 /* enable beacon early termination */ 135 set_bit(WL1271_FLAG_PSM, &wl->flags);
134 ret = wl1271_acx_bet_enable(wl, true);
135 if (ret < 0)
136 return ret;
137
138 ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
139 if (ret < 0)
140 return ret;
141
142 wl1271_ps_elp_sleep(wl);
143 if (ret < 0)
144 return ret;
145
146 wl->psm = 1;
147 break; 136 break;
148 case STATION_ACTIVE_MODE: 137 case STATION_ACTIVE_MODE:
149 default: 138 default:
@@ -162,11 +151,11 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
162 if (ret < 0) 151 if (ret < 0)
163 return ret; 152 return ret;
164 153
165 ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE); 154 ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE, send);
166 if (ret < 0) 155 if (ret < 0)
167 return ret; 156 return ret;
168 157
169 wl->psm = 0; 158 clear_bit(WL1271_FLAG_PSM, &wl->flags);
170 break; 159 break;
171 } 160 }
172 161
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.h b/drivers/net/wireless/wl12xx/wl1271_ps.h
index 779653d0ae85..940276f517a4 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.h
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.h
@@ -27,7 +27,8 @@
27#include "wl1271.h" 27#include "wl1271.h"
28#include "wl1271_acx.h" 28#include "wl1271_acx.h"
29 29
30int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode); 30int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
31 bool send);
31void wl1271_ps_elp_sleep(struct wl1271 *wl); 32void wl1271_ps_elp_sleep(struct wl1271 *wl);
32int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake); 33int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake);
33void wl1271_elp_work(struct work_struct *work); 34void wl1271_elp_work(struct work_struct *work);
diff --git a/drivers/net/wireless/wl12xx/wl1271_reg.h b/drivers/net/wireless/wl12xx/wl1271_reg.h
index 1f237389d1c7..990960771528 100644
--- a/drivers/net/wireless/wl12xx/wl1271_reg.h
+++ b/drivers/net/wireless/wl12xx/wl1271_reg.h
@@ -62,73 +62,10 @@
62#define WL1271_SLV_REG_DATA (REGISTERS_BASE + 0x0008) 62#define WL1271_SLV_REG_DATA (REGISTERS_BASE + 0x0008)
63#define WL1271_SLV_REG_ADATA (REGISTERS_BASE + 0x000c) 63#define WL1271_SLV_REG_ADATA (REGISTERS_BASE + 0x000c)
64#define WL1271_SLV_MEM_DATA (REGISTERS_BASE + 0x0018) 64#define WL1271_SLV_MEM_DATA (REGISTERS_BASE + 0x0018)
65/*
66 * Interrupt registers.
67 * 64 bit interrupt sources registers ws ced.
68 * sme interupts were removed and new ones were added.
69 * Order was changed.
70 */
71#define FIQ_MASK (REGISTERS_BASE + 0x0400)
72#define FIQ_MASK_L (REGISTERS_BASE + 0x0400)
73#define FIQ_MASK_H (REGISTERS_BASE + 0x0404)
74#define FIQ_MASK_SET (REGISTERS_BASE + 0x0408)
75#define FIQ_MASK_SET_L (REGISTERS_BASE + 0x0408)
76#define FIQ_MASK_SET_H (REGISTERS_BASE + 0x040C)
77#define FIQ_MASK_CLR (REGISTERS_BASE + 0x0410)
78#define FIQ_MASK_CLR_L (REGISTERS_BASE + 0x0410)
79#define FIQ_MASK_CLR_H (REGISTERS_BASE + 0x0414)
80#define IRQ_MASK (REGISTERS_BASE + 0x0418)
81#define IRQ_MASK_L (REGISTERS_BASE + 0x0418)
82#define IRQ_MASK_H (REGISTERS_BASE + 0x041C)
83#define IRQ_MASK_SET (REGISTERS_BASE + 0x0420)
84#define IRQ_MASK_SET_L (REGISTERS_BASE + 0x0420)
85#define IRQ_MASK_SET_H (REGISTERS_BASE + 0x0424)
86#define IRQ_MASK_CLR (REGISTERS_BASE + 0x0428)
87#define IRQ_MASK_CLR_L (REGISTERS_BASE + 0x0428)
88#define IRQ_MASK_CLR_H (REGISTERS_BASE + 0x042C)
89#define ECPU_MASK (REGISTERS_BASE + 0x0448)
90#define FIQ_STS_L (REGISTERS_BASE + 0x044C)
91#define FIQ_STS_H (REGISTERS_BASE + 0x0450)
92#define IRQ_STS_L (REGISTERS_BASE + 0x0454)
93#define IRQ_STS_H (REGISTERS_BASE + 0x0458)
94#define INT_STS_ND (REGISTERS_BASE + 0x0464)
95#define INT_STS_RAW_L (REGISTERS_BASE + 0x0464)
96#define INT_STS_RAW_H (REGISTERS_BASE + 0x0468)
97#define INT_STS_CLR (REGISTERS_BASE + 0x04B4)
98#define INT_STS_CLR_L (REGISTERS_BASE + 0x04B4)
99#define INT_STS_CLR_H (REGISTERS_BASE + 0x04B8)
100#define INT_ACK (REGISTERS_BASE + 0x046C)
101#define INT_ACK_L (REGISTERS_BASE + 0x046C)
102#define INT_ACK_H (REGISTERS_BASE + 0x0470)
103#define INT_TRIG (REGISTERS_BASE + 0x0474)
104#define INT_TRIG_L (REGISTERS_BASE + 0x0474)
105#define INT_TRIG_H (REGISTERS_BASE + 0x0478)
106#define HOST_STS_L (REGISTERS_BASE + 0x045C)
107#define HOST_STS_H (REGISTERS_BASE + 0x0460)
108#define HOST_MASK (REGISTERS_BASE + 0x0430)
109#define HOST_MASK_L (REGISTERS_BASE + 0x0430)
110#define HOST_MASK_H (REGISTERS_BASE + 0x0434)
111#define HOST_MASK_SET (REGISTERS_BASE + 0x0438)
112#define HOST_MASK_SET_L (REGISTERS_BASE + 0x0438)
113#define HOST_MASK_SET_H (REGISTERS_BASE + 0x043C)
114#define HOST_MASK_CLR (REGISTERS_BASE + 0x0440)
115#define HOST_MASK_CLR_L (REGISTERS_BASE + 0x0440)
116#define HOST_MASK_CLR_H (REGISTERS_BASE + 0x0444)
117 65
118#define ACX_REG_INTERRUPT_TRIG (REGISTERS_BASE + 0x0474) 66#define ACX_REG_INTERRUPT_TRIG (REGISTERS_BASE + 0x0474)
119#define ACX_REG_INTERRUPT_TRIG_H (REGISTERS_BASE + 0x0478) 67#define ACX_REG_INTERRUPT_TRIG_H (REGISTERS_BASE + 0x0478)
120 68
121/* Host Interrupts*/
122#define HINT_MASK (REGISTERS_BASE + 0x0494)
123#define HINT_MASK_SET (REGISTERS_BASE + 0x0498)
124#define HINT_MASK_CLR (REGISTERS_BASE + 0x049C)
125#define HINT_STS_ND_MASKED (REGISTERS_BASE + 0x04A0)
126/*1150 spec calls this HINT_STS_RAW*/
127#define HINT_STS_ND (REGISTERS_BASE + 0x04B0)
128#define HINT_STS_CLR (REGISTERS_BASE + 0x04A4)
129#define HINT_ACK (REGISTERS_BASE + 0x04A8)
130#define HINT_TRIG (REGISTERS_BASE + 0x04AC)
131
132/*============================================= 69/*=============================================
133 Host Interrupt Mask Register - 32bit (RW) 70 Host Interrupt Mask Register - 32bit (RW)
134 ------------------------------------------ 71 ------------------------------------------
@@ -433,16 +370,6 @@
433 370
434 371
435/*=============================================== 372/*===============================================
436 Phy regs
437 ===============================================*/
438#define ACX_PHY_ADDR_REG SBB_ADDR
439#define ACX_PHY_DATA_REG SBB_DATA
440#define ACX_PHY_CTRL_REG SBB_CTL
441#define ACX_PHY_REG_WR_MASK 0x00000001ul
442#define ACX_PHY_REG_RD_MASK 0x00000002ul
443
444
445/*===============================================
446 EEPROM Read/Write Request 32bit RW 373 EEPROM Read/Write Request 32bit RW
447 ------------------------------------------ 374 ------------------------------------------
448 1 EE_READ - EEPROM Read Request 1 - Setting this bit 375 1 EE_READ - EEPROM Read Request 1 - Setting this bit
@@ -511,28 +438,6 @@
511#define ACX_CONT_WIND_MIN_MASK 0x0000007f 438#define ACX_CONT_WIND_MIN_MASK 0x0000007f
512#define ACX_CONT_WIND_MAX 0x03ff0000 439#define ACX_CONT_WIND_MAX 0x03ff0000
513 440
514/*
515 * Indirect slave register/memory registers
516 * ----------------------------------------
517 */
518#define HW_SLAVE_REG_ADDR_REG 0x00000004
519#define HW_SLAVE_REG_DATA_REG 0x00000008
520#define HW_SLAVE_REG_CTRL_REG 0x0000000c
521
522#define SLAVE_AUTO_INC 0x00010000
523#define SLAVE_NO_AUTO_INC 0x00000000
524#define SLAVE_HOST_LITTLE_ENDIAN 0x00000000
525
526#define HW_SLAVE_MEM_ADDR_REG SLV_MEM_ADDR
527#define HW_SLAVE_MEM_DATA_REG SLV_MEM_DATA
528#define HW_SLAVE_MEM_CTRL_REG SLV_MEM_CTL
529#define HW_SLAVE_MEM_ENDIAN_REG SLV_END_CTL
530
531#define HW_FUNC_EVENT_INT_EN 0x8000
532#define HW_FUNC_EVENT_MASK_REG 0x00000034
533
534#define ACX_MAC_TIMESTAMP_REG (MAC_TIMESTAMP)
535
536/*=============================================== 441/*===============================================
537 HI_CFG Interface Configuration Register Values 442 HI_CFG Interface Configuration Register Values
538 ------------------------------------------ 443 ------------------------------------------
@@ -647,10 +552,6 @@ b12-b0 - Supported Rate indicator bits as defined below.
647******************************************************************************/ 552******************************************************************************/
648 553
649 554
650#define TNETW1251_CHIP_ID_PG1_0 0x07010101
651#define TNETW1251_CHIP_ID_PG1_1 0x07020101
652#define TNETW1251_CHIP_ID_PG1_2 0x07030101
653
654/************************************************************************* 555/*************************************************************************
655 556
656 Interrupt Trigger Register (Host -> WiLink) 557 Interrupt Trigger Register (Host -> WiLink)
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/wl1271_rx.c
index ca645f38109b..6730f5b96e76 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.c
@@ -26,6 +26,7 @@
26#include "wl1271_reg.h" 26#include "wl1271_reg.h"
27#include "wl1271_rx.h" 27#include "wl1271_rx.h"
28#include "wl1271_spi.h" 28#include "wl1271_spi.h"
29#include "wl1271_io.h"
29 30
30static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status, 31static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status,
31 u32 drv_rx_counter) 32 u32 drv_rx_counter)
@@ -166,7 +167,7 @@ static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
166 } 167 }
167 168
168 buf = skb_put(skb, length); 169 buf = skb_put(skb, length);
169 wl1271_spi_read(wl, WL1271_SLV_MEM_DATA, buf, length, true); 170 wl1271_read(wl, WL1271_SLV_MEM_DATA, buf, length, true);
170 171
171 /* the data read starts with the descriptor */ 172 /* the data read starts with the descriptor */
172 desc = (struct wl1271_rx_descriptor *) buf; 173 desc = (struct wl1271_rx_descriptor *) buf;
@@ -210,15 +211,13 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
210 wl->rx_mem_pool_addr.addr + 4; 211 wl->rx_mem_pool_addr.addr + 4;
211 212
212 /* Choose the block we want to read */ 213 /* Choose the block we want to read */
213 wl1271_spi_write(wl, WL1271_SLV_REG_DATA, 214 wl1271_write(wl, WL1271_SLV_REG_DATA, &wl->rx_mem_pool_addr,
214 &wl->rx_mem_pool_addr, 215 sizeof(wl->rx_mem_pool_addr), false);
215 sizeof(wl->rx_mem_pool_addr), false);
216 216
217 wl1271_rx_handle_data(wl, buf_size); 217 wl1271_rx_handle_data(wl, buf_size);
218 218
219 wl->rx_counter++; 219 wl->rx_counter++;
220 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; 220 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
221 wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
221 } 222 }
222
223 wl1271_spi_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
224} 223}
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
index 02978a16e732..67a82934f36e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
@@ -30,28 +30,6 @@
30#include "wl12xx_80211.h" 30#include "wl12xx_80211.h"
31#include "wl1271_spi.h" 31#include "wl1271_spi.h"
32 32
33static int wl1271_translate_addr(struct wl1271 *wl, int addr)
34{
35 /*
36 * To translate, first check to which window of addresses the
37 * particular address belongs. Then subtract the starting address
38 * of that window from the address. Then, add offset of the
39 * translated region.
40 *
41 * The translated regions occur next to each other in physical device
42 * memory, so just add the sizes of the preceeding address regions to
43 * get the offset to the new region.
44 *
45 * Currently, only the two first regions are addressed, and the
46 * assumption is that all addresses will fall into either of those
47 * two.
48 */
49 if ((addr >= wl->part.reg.start) &&
50 (addr < wl->part.reg.start + wl->part.reg.size))
51 return addr - wl->part.reg.start + wl->part.mem.size;
52 else
53 return addr - wl->part.mem.start;
54}
55 33
56void wl1271_spi_reset(struct wl1271 *wl) 34void wl1271_spi_reset(struct wl1271 *wl)
57{ 35{
@@ -133,67 +111,6 @@ void wl1271_spi_init(struct wl1271 *wl)
133 wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN); 111 wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
134} 112}
135 113
136/* Set the SPI partitions to access the chip addresses
137 *
138 * To simplify driver code, a fixed (virtual) memory map is defined for
139 * register and memory addresses. Because in the chipset, in different stages
140 * of operation, those addresses will move around, an address translation
141 * mechanism is required.
142 *
143 * There are four partitions (three memory and one register partition),
144 * which are mapped to two different areas of the hardware memory.
145 *
146 * Virtual address
147 * space
148 *
149 * | |
150 * ...+----+--> mem.start
151 * Physical address ... | |
152 * space ... | | [PART_0]
153 * ... | |
154 * 00000000 <--+----+... ...+----+--> mem.start + mem.size
155 * | | ... | |
156 * |MEM | ... | |
157 * | | ... | |
158 * mem.size <--+----+... | | {unused area)
159 * | | ... | |
160 * |REG | ... | |
161 * mem.size | | ... | |
162 * + <--+----+... ...+----+--> reg.start
163 * reg.size | | ... | |
164 * |MEM2| ... | | [PART_1]
165 * | | ... | |
166 * ...+----+--> reg.start + reg.size
167 * | |
168 *
169 */
170int wl1271_set_partition(struct wl1271 *wl,
171 struct wl1271_partition_set *p)
172{
173 /* copy partition info */
174 memcpy(&wl->part, p, sizeof(*p));
175
176 wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
177 p->mem.start, p->mem.size);
178 wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
179 p->reg.start, p->reg.size);
180 wl1271_debug(DEBUG_SPI, "mem2_start %08X mem2_size %08X",
181 p->mem2.start, p->mem2.size);
182 wl1271_debug(DEBUG_SPI, "mem3_start %08X mem3_size %08X",
183 p->mem3.start, p->mem3.size);
184
185 /* write partition info to the chipset */
186 wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
187 wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
188 wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
189 wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
190 wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
191 wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
192 wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
193
194 return 0;
195}
196
197#define WL1271_BUSY_WORD_TIMEOUT 1000 114#define WL1271_BUSY_WORD_TIMEOUT 1000
198 115
199/* FIXME: Check busy words, removed due to SPI bug */ 116/* FIXME: Check busy words, removed due to SPI bug */
@@ -338,78 +255,3 @@ void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
338 wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd)); 255 wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd));
339 wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, len); 256 wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, len);
340} 257}
341
342void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, size_t len,
343 bool fixed)
344{
345 int physical;
346
347 physical = wl1271_translate_addr(wl, addr);
348
349 wl1271_spi_raw_read(wl, physical, buf, len, fixed);
350}
351
352void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, size_t len,
353 bool fixed)
354{
355 int physical;
356
357 physical = wl1271_translate_addr(wl, addr);
358
359 wl1271_spi_raw_write(wl, physical, buf, len, fixed);
360}
361
362u32 wl1271_spi_read32(struct wl1271 *wl, int addr)
363{
364 return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
365}
366
367void wl1271_spi_write32(struct wl1271 *wl, int addr, u32 val)
368{
369 wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val);
370}
371
372void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
373{
374 /* write address >> 1 + 0x30000 to OCP_POR_CTR */
375 addr = (addr >> 1) + 0x30000;
376 wl1271_spi_write32(wl, OCP_POR_CTR, addr);
377
378 /* write value to OCP_POR_WDATA */
379 wl1271_spi_write32(wl, OCP_DATA_WRITE, val);
380
381 /* write 1 to OCP_CMD */
382 wl1271_spi_write32(wl, OCP_CMD, OCP_CMD_WRITE);
383}
384
385u16 wl1271_top_reg_read(struct wl1271 *wl, int addr)
386{
387 u32 val;
388 int timeout = OCP_CMD_LOOP;
389
390 /* write address >> 1 + 0x30000 to OCP_POR_CTR */
391 addr = (addr >> 1) + 0x30000;
392 wl1271_spi_write32(wl, OCP_POR_CTR, addr);
393
394 /* write 2 to OCP_CMD */
395 wl1271_spi_write32(wl, OCP_CMD, OCP_CMD_READ);
396
397 /* poll for data ready */
398 do {
399 val = wl1271_spi_read32(wl, OCP_DATA_READ);
400 timeout--;
401 } while (!(val & OCP_READY_MASK) && timeout);
402
403 if (!timeout) {
404 wl1271_warning("Top register access timed out.");
405 return 0xffff;
406 }
407
408 /* check data status and return if OK */
409 if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK)
410 return val & 0xffff;
411 else {
412 wl1271_warning("Top register access returned error.");
413 return 0xffff;
414 }
415}
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.h b/drivers/net/wireless/wl12xx/wl1271_spi.h
index cb7df1c56314..a803596dad4a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.h
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.h
@@ -90,37 +90,7 @@ void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
90void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf, 90void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
91 size_t len, bool fixed); 91 size_t len, bool fixed);
92 92
93/* Translated target IO */
94void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, size_t len,
95 bool fixed);
96void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, size_t len,
97 bool fixed);
98u32 wl1271_spi_read32(struct wl1271 *wl, int addr);
99void wl1271_spi_write32(struct wl1271 *wl, int addr, u32 val);
100
101/* Top Register IO */
102void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val);
103u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
104
105/* INIT and RESET words */ 93/* INIT and RESET words */
106void wl1271_spi_reset(struct wl1271 *wl); 94void wl1271_spi_reset(struct wl1271 *wl);
107void wl1271_spi_init(struct wl1271 *wl); 95void wl1271_spi_init(struct wl1271 *wl);
108int wl1271_set_partition(struct wl1271 *wl,
109 struct wl1271_partition_set *p);
110
111static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
112{
113 wl1271_spi_raw_read(wl, addr, &wl->buffer_32,
114 sizeof(wl->buffer_32), false);
115
116 return wl->buffer_32;
117}
118
119static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val)
120{
121 wl->buffer_32 = val;
122 wl1271_spi_raw_write(wl, addr, &wl->buffer_32,
123 sizeof(wl->buffer_32), false);
124}
125
126#endif /* __WL1271_SPI_H__ */ 96#endif /* __WL1271_SPI_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.c b/drivers/net/wireless/wl12xx/wl1271_testmode.c
new file mode 100644
index 000000000000..3919102e942e
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_testmode.c
@@ -0,0 +1,283 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2010 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23#include "wl1271_testmode.h"
24
25#include <net/genetlink.h>
26
27#include "wl1271.h"
28#include "wl1271_spi.h"
29#include "wl1271_acx.h"
30
31#define WL1271_TM_MAX_DATA_LENGTH 1024
32
33enum wl1271_tm_commands {
34 WL1271_TM_CMD_UNSPEC,
35 WL1271_TM_CMD_TEST,
36 WL1271_TM_CMD_INTERROGATE,
37 WL1271_TM_CMD_CONFIGURE,
38 WL1271_TM_CMD_NVS_PUSH,
39 WL1271_TM_CMD_SET_PLT_MODE,
40
41 __WL1271_TM_CMD_AFTER_LAST
42};
43#define WL1271_TM_CMD_MAX (__WL1271_TM_CMD_AFTER_LAST - 1)
44
45enum wl1271_tm_attrs {
46 WL1271_TM_ATTR_UNSPEC,
47 WL1271_TM_ATTR_CMD_ID,
48 WL1271_TM_ATTR_ANSWER,
49 WL1271_TM_ATTR_DATA,
50 WL1271_TM_ATTR_IE_ID,
51 WL1271_TM_ATTR_PLT_MODE,
52
53 __WL1271_TM_ATTR_AFTER_LAST
54};
55#define WL1271_TM_ATTR_MAX (__WL1271_TM_ATTR_AFTER_LAST - 1)
56
57static struct nla_policy wl1271_tm_policy[WL1271_TM_ATTR_MAX + 1] = {
58 [WL1271_TM_ATTR_CMD_ID] = { .type = NLA_U32 },
59 [WL1271_TM_ATTR_ANSWER] = { .type = NLA_U8 },
60 [WL1271_TM_ATTR_DATA] = { .type = NLA_BINARY,
61 .len = WL1271_TM_MAX_DATA_LENGTH },
62 [WL1271_TM_ATTR_IE_ID] = { .type = NLA_U32 },
63 [WL1271_TM_ATTR_PLT_MODE] = { .type = NLA_U32 },
64};
65
66
67static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
68{
69 int buf_len, ret, len;
70 struct sk_buff *skb;
71 void *buf;
72 u8 answer = 0;
73
74 wl1271_debug(DEBUG_TESTMODE, "testmode cmd test");
75
76 if (!tb[WL1271_TM_ATTR_DATA])
77 return -EINVAL;
78
79 buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
80 buf_len = nla_len(tb[WL1271_TM_ATTR_DATA]);
81
82 if (tb[WL1271_TM_ATTR_ANSWER])
83 answer = nla_get_u8(tb[WL1271_TM_ATTR_ANSWER]);
84
85 if (buf_len > sizeof(struct wl1271_command))
86 return -EMSGSIZE;
87
88 mutex_lock(&wl->mutex);
89 ret = wl1271_cmd_test(wl, buf, buf_len, answer);
90 mutex_unlock(&wl->mutex);
91
92 if (ret < 0) {
93 wl1271_warning("testmode cmd test failed: %d", ret);
94 return ret;
95 }
96
97 if (answer) {
98 len = nla_total_size(buf_len);
99 skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len);
100 if (!skb)
101 return -ENOMEM;
102
103 NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf);
104 ret = cfg80211_testmode_reply(skb);
105 if (ret < 0)
106 return ret;
107 }
108
109 return 0;
110
111nla_put_failure:
112 kfree_skb(skb);
113 return -EMSGSIZE;
114}
115
116static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
117{
118 int ret;
119 struct wl1271_command *cmd;
120 struct sk_buff *skb;
121 u8 ie_id;
122
123 wl1271_debug(DEBUG_TESTMODE, "testmode cmd interrogate");
124
125 if (!tb[WL1271_TM_ATTR_IE_ID])
126 return -EINVAL;
127
128 ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]);
129
130 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
131 if (!cmd)
132 return -ENOMEM;
133
134 mutex_lock(&wl->mutex);
135 ret = wl1271_cmd_interrogate(wl, ie_id, cmd, sizeof(*cmd));
136 mutex_unlock(&wl->mutex);
137
138 if (ret < 0) {
139 wl1271_warning("testmode cmd interrogate failed: %d", ret);
140 return ret;
141 }
142
143 skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd));
144 if (!skb)
145 return -ENOMEM;
146
147 NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd);
148
149 return 0;
150
151nla_put_failure:
152 kfree_skb(skb);
153 return -EMSGSIZE;
154}
155
156static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
157{
158 int buf_len, ret;
159 void *buf;
160 u8 ie_id;
161
162 wl1271_debug(DEBUG_TESTMODE, "testmode cmd configure");
163
164 if (!tb[WL1271_TM_ATTR_DATA])
165 return -EINVAL;
166 if (!tb[WL1271_TM_ATTR_IE_ID])
167 return -EINVAL;
168
169 ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]);
170 buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
171 buf_len = nla_len(tb[WL1271_TM_ATTR_DATA]);
172
173 if (buf_len > sizeof(struct wl1271_command))
174 return -EMSGSIZE;
175
176 mutex_lock(&wl->mutex);
177 ret = wl1271_cmd_configure(wl, ie_id, buf, buf_len);
178 mutex_unlock(&wl->mutex);
179
180 if (ret < 0) {
181 wl1271_warning("testmode cmd configure failed: %d", ret);
182 return ret;
183 }
184
185 return 0;
186}
187
188static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[])
189{
190 int ret = 0;
191 size_t len;
192 void *buf;
193
194 wl1271_debug(DEBUG_TESTMODE, "testmode cmd nvs push");
195
196 if (!tb[WL1271_TM_ATTR_DATA])
197 return -EINVAL;
198
199 buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
200 len = nla_len(tb[WL1271_TM_ATTR_DATA]);
201
202 if (len != sizeof(struct wl1271_nvs_file)) {
203 wl1271_error("nvs size is not as expected: %zu != %zu",
204 len, sizeof(struct wl1271_nvs_file));
205 return -EMSGSIZE;
206 }
207
208 mutex_lock(&wl->mutex);
209
210 kfree(wl->nvs);
211
212 wl->nvs = kmalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL);
213 if (!wl->nvs) {
214 wl1271_error("could not allocate memory for the nvs file");
215 ret = -ENOMEM;
216 goto out;
217 }
218
219 memcpy(wl->nvs, buf, len);
220
221 wl1271_debug(DEBUG_TESTMODE, "testmode pushed nvs");
222
223out:
224 mutex_unlock(&wl->mutex);
225
226 return ret;
227}
228
229static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
230{
231 u32 val;
232 int ret;
233
234 wl1271_debug(DEBUG_TESTMODE, "testmode cmd set plt mode");
235
236 if (!tb[WL1271_TM_ATTR_PLT_MODE])
237 return -EINVAL;
238
239 val = nla_get_u32(tb[WL1271_TM_ATTR_PLT_MODE]);
240
241 switch (val) {
242 case 0:
243 ret = wl1271_plt_stop(wl);
244 break;
245 case 1:
246 ret = wl1271_plt_start(wl);
247 break;
248 default:
249 ret = -EINVAL;
250 break;
251 }
252
253 return ret;
254}
255
256int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
257{
258 struct wl1271 *wl = hw->priv;
259 struct nlattr *tb[WL1271_TM_ATTR_MAX + 1];
260 int err;
261
262 err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy);
263 if (err)
264 return err;
265
266 if (!tb[WL1271_TM_ATTR_CMD_ID])
267 return -EINVAL;
268
269 switch (nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID])) {
270 case WL1271_TM_CMD_TEST:
271 return wl1271_tm_cmd_test(wl, tb);
272 case WL1271_TM_CMD_INTERROGATE:
273 return wl1271_tm_cmd_interrogate(wl, tb);
274 case WL1271_TM_CMD_CONFIGURE:
275 return wl1271_tm_cmd_configure(wl, tb);
276 case WL1271_TM_CMD_NVS_PUSH:
277 return wl1271_tm_cmd_nvs_push(wl, tb);
278 case WL1271_TM_CMD_SET_PLT_MODE:
279 return wl1271_tm_cmd_set_plt_mode(wl, tb);
280 default:
281 return -EOPNOTSUPP;
282 }
283}
diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.h b/drivers/net/wireless/wl12xx/wl1271_testmode.h
new file mode 100644
index 000000000000..c196d28f9d9d
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_testmode.h
@@ -0,0 +1,31 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2010 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#ifndef __WL1271_TESTMODE_H__
25#define __WL1271_TESTMODE_H__
26
27#include <net/mac80211.h>
28
29int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len);
30
31#endif /* __WL1271_TESTMODE_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index 00af065c77c2..811e739d05bf 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -26,6 +26,7 @@
26 26
27#include "wl1271.h" 27#include "wl1271.h"
28#include "wl1271_spi.h" 28#include "wl1271_spi.h"
29#include "wl1271_io.h"
29#include "wl1271_reg.h" 30#include "wl1271_reg.h"
30#include "wl1271_ps.h" 31#include "wl1271_ps.h"
31#include "wl1271_tx.h" 32#include "wl1271_tx.h"
@@ -87,7 +88,7 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
87 u32 extra, struct ieee80211_tx_info *control) 88 u32 extra, struct ieee80211_tx_info *control)
88{ 89{
89 struct wl1271_tx_hw_descr *desc; 90 struct wl1271_tx_hw_descr *desc;
90 int pad; 91 int pad, ac;
91 u16 tx_attr; 92 u16 tx_attr;
92 93
93 desc = (struct wl1271_tx_hw_descr *) skb->data; 94 desc = (struct wl1271_tx_hw_descr *) skb->data;
@@ -107,9 +108,11 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
107 108
108 /* configure the tx attributes */ 109 /* configure the tx attributes */
109 tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER; 110 tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
110 /* FIXME: do we know the packet priority? can we identify mgmt 111
111 packets, and use max prio for them at least? */ 112 /* queue */
112 desc->tid = 0; 113 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
114 desc->tid = wl1271_tx_ac_to_tid(ac);
115
113 desc->aid = TX_HW_DEFAULT_AID; 116 desc->aid = TX_HW_DEFAULT_AID;
114 desc->reserved = 0; 117 desc->reserved = 0;
115 118
@@ -121,6 +124,11 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
121 pad = pad - skb->len; 124 pad = pad - skb->len;
122 tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD; 125 tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
123 126
127 /* if the packets are destined for AP (have a STA entry) send them
128 with AP rate policies, otherwise use default basic rates */
129 if (control->control.sta)
130 tx_attr |= ACX_TX_AP_FULL_RATE << TX_HW_ATTR_OFST_RATE_POLICY;
131
124 desc->tx_attr = cpu_to_le16(tx_attr); 132 desc->tx_attr = cpu_to_le16(tx_attr);
125 133
126 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad); 134 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad);
@@ -158,11 +166,11 @@ static int wl1271_tx_send_packet(struct wl1271 *wl, struct sk_buff *skb,
158 len = WL1271_TX_ALIGN(skb->len); 166 len = WL1271_TX_ALIGN(skb->len);
159 167
160 /* perform a fixed address block write with the packet */ 168 /* perform a fixed address block write with the packet */
161 wl1271_spi_write(wl, WL1271_SLV_MEM_DATA, skb->data, len, true); 169 wl1271_write(wl, WL1271_SLV_MEM_DATA, skb->data, len, true);
162 170
163 /* write packet new counter into the write access register */ 171 /* write packet new counter into the write access register */
164 wl->tx_packets_count++; 172 wl->tx_packets_count++;
165 wl1271_spi_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count); 173 wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
166 174
167 desc = (struct wl1271_tx_hw_descr *) skb->data; 175 desc = (struct wl1271_tx_hw_descr *) skb->data;
168 wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)", 176 wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)",
@@ -196,6 +204,7 @@ static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb)
196 ret = wl1271_cmd_set_default_wep_key(wl, idx); 204 ret = wl1271_cmd_set_default_wep_key(wl, idx);
197 if (ret < 0) 205 if (ret < 0)
198 return ret; 206 return ret;
207 wl->default_key = idx;
199 } 208 }
200 } 209 }
201 210
@@ -214,18 +223,50 @@ static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb)
214 return ret; 223 return ret;
215} 224}
216 225
226static u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
227{
228 struct ieee80211_supported_band *band;
229 u32 enabled_rates = 0;
230 int bit;
231
232 band = wl->hw->wiphy->bands[wl->band];
233 for (bit = 0; bit < band->n_bitrates; bit++) {
234 if (rate_set & 0x1)
235 enabled_rates |= band->bitrates[bit].hw_value;
236 rate_set >>= 1;
237 }
238
239 return enabled_rates;
240}
241
217void wl1271_tx_work(struct work_struct *work) 242void wl1271_tx_work(struct work_struct *work)
218{ 243{
219 struct wl1271 *wl = container_of(work, struct wl1271, tx_work); 244 struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
220 struct sk_buff *skb; 245 struct sk_buff *skb;
221 bool woken_up = false; 246 bool woken_up = false;
247 u32 sta_rates = 0;
222 int ret; 248 int ret;
223 249
250 /* check if the rates supported by the AP have changed */
251 if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED,
252 &wl->flags))) {
253 unsigned long flags;
254 spin_lock_irqsave(&wl->wl_lock, flags);
255 sta_rates = wl->sta_rate_set;
256 spin_unlock_irqrestore(&wl->wl_lock, flags);
257 }
258
224 mutex_lock(&wl->mutex); 259 mutex_lock(&wl->mutex);
225 260
226 if (unlikely(wl->state == WL1271_STATE_OFF)) 261 if (unlikely(wl->state == WL1271_STATE_OFF))
227 goto out; 262 goto out;
228 263
264 /* if rates have changed, re-configure the rate policy */
265 if (unlikely(sta_rates)) {
266 wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
267 wl1271_acx_rate_policies(wl);
268 }
269
229 while ((skb = skb_dequeue(&wl->tx_queue))) { 270 while ((skb = skb_dequeue(&wl->tx_queue))) {
230 if (!woken_up) { 271 if (!woken_up) {
231 ret = wl1271_ps_elp_wakeup(wl, false); 272 ret = wl1271_ps_elp_wakeup(wl, false);
@@ -240,18 +281,18 @@ void wl1271_tx_work(struct work_struct *work)
240 wl1271_debug(DEBUG_TX, "tx_work: fw buffer full, " 281 wl1271_debug(DEBUG_TX, "tx_work: fw buffer full, "
241 "stop queues"); 282 "stop queues");
242 ieee80211_stop_queues(wl->hw); 283 ieee80211_stop_queues(wl->hw);
243 wl->tx_queue_stopped = true; 284 set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
244 skb_queue_head(&wl->tx_queue, skb); 285 skb_queue_head(&wl->tx_queue, skb);
245 goto out; 286 goto out;
246 } else if (ret < 0) { 287 } else if (ret < 0) {
247 dev_kfree_skb(skb); 288 dev_kfree_skb(skb);
248 goto out; 289 goto out;
249 } else if (wl->tx_queue_stopped) { 290 } else if (test_and_clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED,
291 &wl->flags)) {
250 /* firmware buffer has space, restart queues */ 292 /* firmware buffer has space, restart queues */
251 wl1271_debug(DEBUG_TX, 293 wl1271_debug(DEBUG_TX,
252 "complete_packet: waking queues"); 294 "complete_packet: waking queues");
253 ieee80211_wake_queues(wl->hw); 295 ieee80211_wake_queues(wl->hw);
254 wl->tx_queue_stopped = false;
255 } 296 }
256 } 297 }
257 298
@@ -335,8 +376,8 @@ void wl1271_tx_complete(struct wl1271 *wl, u32 count)
335 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count); 376 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
336 377
337 /* read the tx results from the chipset */ 378 /* read the tx results from the chipset */
338 wl1271_spi_read(wl, le32_to_cpu(memmap->tx_result), 379 wl1271_read(wl, le32_to_cpu(memmap->tx_result),
339 wl->tx_res_if, sizeof(*wl->tx_res_if), false); 380 wl->tx_res_if, sizeof(*wl->tx_res_if), false);
340 381
341 /* verify that the result buffer is not getting overrun */ 382 /* verify that the result buffer is not getting overrun */
342 if (count > TX_HW_RESULT_QUEUE_LEN) { 383 if (count > TX_HW_RESULT_QUEUE_LEN) {
@@ -357,10 +398,10 @@ void wl1271_tx_complete(struct wl1271 *wl, u32 count)
357 } 398 }
358 399
359 /* write host counter to chipset (to ack) */ 400 /* write host counter to chipset (to ack) */
360 wl1271_spi_write32(wl, le32_to_cpu(memmap->tx_result) + 401 wl1271_write32(wl, le32_to_cpu(memmap->tx_result) +
361 offsetof(struct wl1271_tx_hw_res_if, 402 offsetof(struct wl1271_tx_hw_res_if,
362 tx_result_host_counter), 403 tx_result_host_counter),
363 le32_to_cpu(wl->tx_res_if->tx_result_fw_counter)); 404 le32_to_cpu(wl->tx_res_if->tx_result_fw_counter));
364} 405}
365 406
366/* caller must hold wl->mutex */ 407/* caller must hold wl->mutex */
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/wl1271_tx.h
index 416396caf0a0..17e405a09caa 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.h
@@ -123,6 +123,42 @@ struct wl1271_tx_hw_res_if {
123 struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN]; 123 struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN];
124} __attribute__ ((packed)); 124} __attribute__ ((packed));
125 125
126static inline int wl1271_tx_get_queue(int queue)
127{
128 /* FIXME: use best effort until WMM is enabled */
129 return CONF_TX_AC_BE;
130
131 switch (queue) {
132 case 0:
133 return CONF_TX_AC_VO;
134 case 1:
135 return CONF_TX_AC_VI;
136 case 2:
137 return CONF_TX_AC_BE;
138 case 3:
139 return CONF_TX_AC_BK;
140 default:
141 return CONF_TX_AC_BE;
142 }
143}
144
145/* wl1271 tx descriptor needs the tid and we need to convert it from ac */
146static inline int wl1271_tx_ac_to_tid(int ac)
147{
148 switch (ac) {
149 case 0:
150 return 0;
151 case 1:
152 return 2;
153 case 2:
154 return 4;
155 case 3:
156 return 6;
157 default:
158 return 0;
159 }
160}
161
126void wl1271_tx_work(struct work_struct *work); 162void wl1271_tx_work(struct work_struct *work);
127void wl1271_tx_complete(struct wl1271 *wl, u32 count); 163void wl1271_tx_complete(struct wl1271 *wl, u32 count);
128void wl1271_tx_flush(struct wl1271 *wl); 164void wl1271_tx_flush(struct wl1271 *wl);
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 33c8be7ec8e6..6917286edcae 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -875,20 +875,18 @@ static struct iw_statistics *zd1201_get_wireless_stats(struct net_device *dev)
875static void zd1201_set_multicast(struct net_device *dev) 875static void zd1201_set_multicast(struct net_device *dev)
876{ 876{
877 struct zd1201 *zd = netdev_priv(dev); 877 struct zd1201 *zd = netdev_priv(dev);
878 struct dev_mc_list *mc = dev->mc_list; 878 struct dev_mc_list *mc;
879 unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI]; 879 unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI];
880 int i; 880 int i;
881 881
882 if (dev->mc_count > ZD1201_MAXMULTI) 882 if (netdev_mc_count(dev) > ZD1201_MAXMULTI)
883 return; 883 return;
884 884
885 for (i=0; i<dev->mc_count; i++) { 885 i = 0;
886 memcpy(reqbuf+i*ETH_ALEN, mc->dmi_addr, ETH_ALEN); 886 netdev_for_each_mc_addr(mc, dev)
887 mc = mc->next; 887 memcpy(reqbuf + i++ * ETH_ALEN, mc->dmi_addr, ETH_ALEN);
888 }
889 zd1201_setconfig(zd, ZD1201_RID_CNFGROUPADDRESS, reqbuf, 888 zd1201_setconfig(zd, ZD1201_RID_CNFGROUPADDRESS, reqbuf,
890 dev->mc_count*ETH_ALEN, 0); 889 netdev_mc_count(dev) * ETH_ALEN, 0);
891
892} 890}
893 891
894static int zd1201_config_commit(struct net_device *dev, 892static int zd1201_config_commit(struct net_device *dev,
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index f14deb0c8514..2d555cc30508 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -869,7 +869,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
869} 869}
870 870
871static int zd_op_add_interface(struct ieee80211_hw *hw, 871static int zd_op_add_interface(struct ieee80211_hw *hw,
872 struct ieee80211_if_init_conf *conf) 872 struct ieee80211_vif *vif)
873{ 873{
874 struct zd_mac *mac = zd_hw_mac(hw); 874 struct zd_mac *mac = zd_hw_mac(hw);
875 875
@@ -877,22 +877,22 @@ static int zd_op_add_interface(struct ieee80211_hw *hw,
877 if (mac->type != NL80211_IFTYPE_UNSPECIFIED) 877 if (mac->type != NL80211_IFTYPE_UNSPECIFIED)
878 return -EOPNOTSUPP; 878 return -EOPNOTSUPP;
879 879
880 switch (conf->type) { 880 switch (vif->type) {
881 case NL80211_IFTYPE_MONITOR: 881 case NL80211_IFTYPE_MONITOR:
882 case NL80211_IFTYPE_MESH_POINT: 882 case NL80211_IFTYPE_MESH_POINT:
883 case NL80211_IFTYPE_STATION: 883 case NL80211_IFTYPE_STATION:
884 case NL80211_IFTYPE_ADHOC: 884 case NL80211_IFTYPE_ADHOC:
885 mac->type = conf->type; 885 mac->type = vif->type;
886 break; 886 break;
887 default: 887 default:
888 return -EOPNOTSUPP; 888 return -EOPNOTSUPP;
889 } 889 }
890 890
891 return zd_write_mac_addr(&mac->chip, conf->mac_addr); 891 return zd_write_mac_addr(&mac->chip, vif->addr);
892} 892}
893 893
894static void zd_op_remove_interface(struct ieee80211_hw *hw, 894static void zd_op_remove_interface(struct ieee80211_hw *hw,
895 struct ieee80211_if_init_conf *conf) 895 struct ieee80211_vif *vif)
896{ 896{
897 struct zd_mac *mac = zd_hw_mac(hw); 897 struct zd_mac *mac = zd_hw_mac(hw);
898 mac->type = NL80211_IFTYPE_UNSPECIFIED; 898 mac->type = NL80211_IFTYPE_UNSPECIFIED;
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 72d3e437e190..442fc1117326 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -1079,11 +1079,15 @@ static int eject_installer(struct usb_interface *intf)
1079 int r; 1079 int r;
1080 1080
1081 /* Find bulk out endpoint */ 1081 /* Find bulk out endpoint */
1082 endpoint = &iface_desc->endpoint[1].desc; 1082 for (r = 1; r >= 0; r--) {
1083 if (usb_endpoint_dir_out(endpoint) && 1083 endpoint = &iface_desc->endpoint[r].desc;
1084 usb_endpoint_xfer_bulk(endpoint)) { 1084 if (usb_endpoint_dir_out(endpoint) &&
1085 bulk_out_ep = endpoint->bEndpointAddress; 1085 usb_endpoint_xfer_bulk(endpoint)) {
1086 } else { 1086 bulk_out_ep = endpoint->bEndpointAddress;
1087 break;
1088 }
1089 }
1090 if (r == -1) {
1087 dev_err(&udev->dev, 1091 dev_err(&udev->dev,
1088 "zd1211rw: Could not find bulk out endpoint\n"); 1092 "zd1211rw: Could not find bulk out endpoint\n");
1089 return -ENODEV; 1093 return -ENODEV;
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 8c777ba4e2b3..1a74594224b1 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -22,11 +22,17 @@
22 22
23#include <linux/of_device.h> 23#include <linux/of_device.h>
24#include <linux/of_platform.h> 24#include <linux/of_platform.h>
25#include <linux/of_mdio.h>
26#include <linux/phy.h>
25 27
26#define DRIVER_NAME "xilinx_emaclite" 28#define DRIVER_NAME "xilinx_emaclite"
27 29
28/* Register offsets for the EmacLite Core */ 30/* Register offsets for the EmacLite Core */
29#define XEL_TXBUFF_OFFSET 0x0 /* Transmit Buffer */ 31#define XEL_TXBUFF_OFFSET 0x0 /* Transmit Buffer */
32#define XEL_MDIOADDR_OFFSET 0x07E4 /* MDIO Address Register */
33#define XEL_MDIOWR_OFFSET 0x07E8 /* MDIO Write Data Register */
34#define XEL_MDIORD_OFFSET 0x07EC /* MDIO Read Data Register */
35#define XEL_MDIOCTRL_OFFSET 0x07F0 /* MDIO Control Register */
30#define XEL_GIER_OFFSET 0x07F8 /* GIE Register */ 36#define XEL_GIER_OFFSET 0x07F8 /* GIE Register */
31#define XEL_TSR_OFFSET 0x07FC /* Tx status */ 37#define XEL_TSR_OFFSET 0x07FC /* Tx status */
32#define XEL_TPLR_OFFSET 0x07F4 /* Tx packet length */ 38#define XEL_TPLR_OFFSET 0x07F4 /* Tx packet length */
@@ -37,6 +43,22 @@
37 43
38#define XEL_BUFFER_OFFSET 0x0800 /* Next Tx/Rx buffer's offset */ 44#define XEL_BUFFER_OFFSET 0x0800 /* Next Tx/Rx buffer's offset */
39 45
46/* MDIO Address Register Bit Masks */
47#define XEL_MDIOADDR_REGADR_MASK 0x0000001F /* Register Address */
48#define XEL_MDIOADDR_PHYADR_MASK 0x000003E0 /* PHY Address */
49#define XEL_MDIOADDR_PHYADR_SHIFT 5
50#define XEL_MDIOADDR_OP_MASK 0x00000400 /* RD/WR Operation */
51
52/* MDIO Write Data Register Bit Masks */
53#define XEL_MDIOWR_WRDATA_MASK 0x0000FFFF /* Data to be Written */
54
55/* MDIO Read Data Register Bit Masks */
56#define XEL_MDIORD_RDDATA_MASK 0x0000FFFF /* Data to be Read */
57
58/* MDIO Control Register Bit Masks */
59#define XEL_MDIOCTRL_MDIOSTS_MASK 0x00000001 /* MDIO Status Mask */
60#define XEL_MDIOCTRL_MDIOEN_MASK 0x00000008 /* MDIO Enable */
61
40/* Global Interrupt Enable Register (GIER) Bit Masks */ 62/* Global Interrupt Enable Register (GIER) Bit Masks */
41#define XEL_GIER_GIE_MASK 0x80000000 /* Global Enable */ 63#define XEL_GIER_GIE_MASK 0x80000000 /* Global Enable */
42 64
@@ -87,6 +109,12 @@
87 * @reset_lock: lock used for synchronization 109 * @reset_lock: lock used for synchronization
88 * @deferred_skb: holds an skb (for transmission at a later time) when the 110 * @deferred_skb: holds an skb (for transmission at a later time) when the
89 * Tx buffer is not free 111 * Tx buffer is not free
112 * @phy_dev: pointer to the PHY device
113 * @phy_node: pointer to the PHY device node
114 * @mii_bus: pointer to the MII bus
115 * @mdio_irqs: IRQs table for MDIO bus
116 * @last_link: last link status
117 * @has_mdio: indicates whether MDIO is included in the HW
90 */ 118 */
91struct net_local { 119struct net_local {
92 120
@@ -100,6 +128,15 @@ struct net_local {
100 128
101 spinlock_t reset_lock; 129 spinlock_t reset_lock;
102 struct sk_buff *deferred_skb; 130 struct sk_buff *deferred_skb;
131
132 struct phy_device *phy_dev;
133 struct device_node *phy_node;
134
135 struct mii_bus *mii_bus;
136 int mdio_irqs[PHY_MAX_ADDR];
137
138 int last_link;
139 bool has_mdio;
103}; 140};
104 141
105 142
@@ -431,7 +468,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
431} 468}
432 469
433/** 470/**
434 * xemaclite_set_mac_address - Set the MAC address for this device 471 * xemaclite_update_address - Update the MAC address in the device
435 * @drvdata: Pointer to the Emaclite device private data 472 * @drvdata: Pointer to the Emaclite device private data
436 * @address_ptr:Pointer to the MAC address (MAC address is a 48-bit value) 473 * @address_ptr:Pointer to the MAC address (MAC address is a 48-bit value)
437 * 474 *
@@ -441,8 +478,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
441 * The MAC address can be programmed using any of the two transmit 478 * The MAC address can be programmed using any of the two transmit
442 * buffers (if configured). 479 * buffers (if configured).
443 */ 480 */
444static void xemaclite_set_mac_address(struct net_local *drvdata, 481static void xemaclite_update_address(struct net_local *drvdata,
445 u8 *address_ptr) 482 u8 *address_ptr)
446{ 483{
447 void __iomem *addr; 484 void __iomem *addr;
448 u32 reg_data; 485 u32 reg_data;
@@ -465,6 +502,30 @@ static void xemaclite_set_mac_address(struct net_local *drvdata,
465} 502}
466 503
467/** 504/**
505 * xemaclite_set_mac_address - Set the MAC address for this device
506 * @dev: Pointer to the network device instance
507 * @addr: Void pointer to the sockaddr structure
508 *
509 * This function copies the HW address from the sockaddr strucutre to the
510 * net_device structure and updates the address in HW.
511 *
512 * Return: Error if the net device is busy or 0 if the addr is set
513 * successfully
514 */
515static int xemaclite_set_mac_address(struct net_device *dev, void *address)
516{
517 struct net_local *lp = (struct net_local *) netdev_priv(dev);
518 struct sockaddr *addr = address;
519
520 if (netif_running(dev))
521 return -EBUSY;
522
523 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
524 xemaclite_update_address(lp, dev->dev_addr);
525 return 0;
526}
527
528/**
468 * xemaclite_tx_timeout - Callback for Tx Timeout 529 * xemaclite_tx_timeout - Callback for Tx Timeout
469 * @dev: Pointer to the network device 530 * @dev: Pointer to the network device
470 * 531 *
@@ -641,12 +702,219 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
641 return IRQ_HANDLED; 702 return IRQ_HANDLED;
642} 703}
643 704
705/**********************/
706/* MDIO Bus functions */
707/**********************/
708
709/**
710 * xemaclite_mdio_wait - Wait for the MDIO to be ready to use
711 * @lp: Pointer to the Emaclite device private data
712 *
713 * This function waits till the device is ready to accept a new MDIO
714 * request.
715 *
716 * Return: 0 for success or ETIMEDOUT for a timeout
717 */
718
719static int xemaclite_mdio_wait(struct net_local *lp)
720{
721 long end = jiffies + 2;
722
723 /* wait for the MDIO interface to not be busy or timeout
724 after some time.
725 */
726 while (in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
727 XEL_MDIOCTRL_MDIOSTS_MASK) {
728 if (end - jiffies <= 0) {
729 WARN_ON(1);
730 return -ETIMEDOUT;
731 }
732 msleep(1);
733 }
734 return 0;
735}
736
737/**
738 * xemaclite_mdio_read - Read from a given MII management register
739 * @bus: the mii_bus struct
740 * @phy_id: the phy address
741 * @reg: register number to read from
742 *
743 * This function waits till the device is ready to accept a new MDIO
744 * request and then writes the phy address to the MDIO Address register
745 * and reads data from MDIO Read Data register, when its available.
746 *
747 * Return: Value read from the MII management register
748 */
749static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
750{
751 struct net_local *lp = bus->priv;
752 u32 ctrl_reg;
753 u32 rc;
754
755 if (xemaclite_mdio_wait(lp))
756 return -ETIMEDOUT;
757
758 /* Write the PHY address, register number and set the OP bit in the
759 * MDIO Address register. Set the Status bit in the MDIO Control
760 * register to start a MDIO read transaction.
761 */
762 ctrl_reg = in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET);
763 out_be32(lp->base_addr + XEL_MDIOADDR_OFFSET,
764 XEL_MDIOADDR_OP_MASK |
765 ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg));
766 out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
767 ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK);
768
769 if (xemaclite_mdio_wait(lp))
770 return -ETIMEDOUT;
771
772 rc = in_be32(lp->base_addr + XEL_MDIORD_OFFSET);
773
774 dev_dbg(&lp->ndev->dev,
775 "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
776 phy_id, reg, rc);
777
778 return rc;
779}
780
781/**
782 * xemaclite_mdio_write - Write to a given MII management register
783 * @bus: the mii_bus struct
784 * @phy_id: the phy address
785 * @reg: register number to write to
786 * @val: value to write to the register number specified by reg
787 *
788 * This fucntion waits till the device is ready to accept a new MDIO
789 * request and then writes the val to the MDIO Write Data register.
790 */
791static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
792 u16 val)
793{
794 struct net_local *lp = bus->priv;
795 u32 ctrl_reg;
796
797 dev_dbg(&lp->ndev->dev,
798 "xemaclite_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
799 phy_id, reg, val);
800
801 if (xemaclite_mdio_wait(lp))
802 return -ETIMEDOUT;
803
804 /* Write the PHY address, register number and clear the OP bit in the
805 * MDIO Address register and then write the value into the MDIO Write
806 * Data register. Finally, set the Status bit in the MDIO Control
807 * register to start a MDIO write transaction.
808 */
809 ctrl_reg = in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET);
810 out_be32(lp->base_addr + XEL_MDIOADDR_OFFSET,
811 ~XEL_MDIOADDR_OP_MASK &
812 ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg));
813 out_be32(lp->base_addr + XEL_MDIOWR_OFFSET, val);
814 out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
815 ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK);
816
817 return 0;
818}
819
820/**
821 * xemaclite_mdio_reset - Reset the mdio bus.
822 * @bus: Pointer to the MII bus
823 *
824 * This function is required(?) as per Documentation/networking/phy.txt.
825 * There is no reset in this device; this function always returns 0.
826 */
827static int xemaclite_mdio_reset(struct mii_bus *bus)
828{
829 return 0;
830}
831
832/**
833 * xemaclite_mdio_setup - Register mii_bus for the Emaclite device
834 * @lp: Pointer to the Emaclite device private data
835 * @ofdev: Pointer to OF device structure
836 *
837 * This function enables MDIO bus in the Emaclite device and registers a
838 * mii_bus.
839 *
840 * Return: 0 upon success or a negative error upon failure
841 */
842static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
843{
844 struct mii_bus *bus;
845 int rc;
846 struct resource res;
847 struct device_node *np = of_get_parent(lp->phy_node);
848
849 /* Don't register the MDIO bus if the phy_node or its parent node
850 * can't be found.
851 */
852 if (!np)
853 return -ENODEV;
854
855 /* Enable the MDIO bus by asserting the enable bit in MDIO Control
856 * register.
857 */
858 out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
859 XEL_MDIOCTRL_MDIOEN_MASK);
860
861 bus = mdiobus_alloc();
862 if (!bus)
863 return -ENOMEM;
864
865 of_address_to_resource(np, 0, &res);
866 snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
867 (unsigned long long)res.start);
868 bus->priv = lp;
869 bus->name = "Xilinx Emaclite MDIO";
870 bus->read = xemaclite_mdio_read;
871 bus->write = xemaclite_mdio_write;
872 bus->reset = xemaclite_mdio_reset;
873 bus->parent = dev;
874 bus->irq = lp->mdio_irqs; /* preallocated IRQ table */
875
876 lp->mii_bus = bus;
877
878 rc = of_mdiobus_register(bus, np);
879 if (rc)
880 goto err_register;
881
882 return 0;
883
884err_register:
885 mdiobus_free(bus);
886 return rc;
887}
888
889/**
890 * xemaclite_adjust_link - Link state callback for the Emaclite device
891 * @ndev: pointer to net_device struct
892 *
893 * There's nothing in the Emaclite device to be configured when the link
894 * state changes. We just print the status.
895 */
896void xemaclite_adjust_link(struct net_device *ndev)
897{
898 struct net_local *lp = netdev_priv(ndev);
899 struct phy_device *phy = lp->phy_dev;
900 int link_state;
901
902 /* hash together the state values to decide if something has changed */
903 link_state = phy->speed | (phy->duplex << 1) | phy->link;
904
905 if (lp->last_link != link_state) {
906 lp->last_link = link_state;
907 phy_print_status(phy);
908 }
909}
910
644/** 911/**
645 * xemaclite_open - Open the network device 912 * xemaclite_open - Open the network device
646 * @dev: Pointer to the network device 913 * @dev: Pointer to the network device
647 * 914 *
648 * This function sets the MAC address, requests an IRQ and enables interrupts 915 * This function sets the MAC address, requests an IRQ and enables interrupts
649 * for the Emaclite device and starts the Tx queue. 916 * for the Emaclite device and starts the Tx queue.
917 * It also connects to the phy device, if MDIO is included in Emaclite device.
650 */ 918 */
651static int xemaclite_open(struct net_device *dev) 919static int xemaclite_open(struct net_device *dev)
652{ 920{
@@ -656,14 +924,47 @@ static int xemaclite_open(struct net_device *dev)
656 /* Just to be safe, stop the device first */ 924 /* Just to be safe, stop the device first */
657 xemaclite_disable_interrupts(lp); 925 xemaclite_disable_interrupts(lp);
658 926
927 if (lp->phy_node) {
928 u32 bmcr;
929
930 lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
931 xemaclite_adjust_link, 0,
932 PHY_INTERFACE_MODE_MII);
933 if (!lp->phy_dev) {
934 dev_err(&lp->ndev->dev, "of_phy_connect() failed\n");
935 return -ENODEV;
936 }
937
938 /* EmacLite doesn't support giga-bit speeds */
939 lp->phy_dev->supported &= (PHY_BASIC_FEATURES);
940 lp->phy_dev->advertising = lp->phy_dev->supported;
941
942 /* Don't advertise 1000BASE-T Full/Half duplex speeds */
943 phy_write(lp->phy_dev, MII_CTRL1000, 0);
944
945 /* Advertise only 10 and 100mbps full/half duplex speeds */
946 phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL);
947
948 /* Restart auto negotiation */
949 bmcr = phy_read(lp->phy_dev, MII_BMCR);
950 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
951 phy_write(lp->phy_dev, MII_BMCR, bmcr);
952
953 phy_start(lp->phy_dev);
954 }
955
659 /* Set the MAC address each time opened */ 956 /* Set the MAC address each time opened */
660 xemaclite_set_mac_address(lp, dev->dev_addr); 957 xemaclite_update_address(lp, dev->dev_addr);
661 958
662 /* Grab the IRQ */ 959 /* Grab the IRQ */
663 retval = request_irq(dev->irq, xemaclite_interrupt, 0, dev->name, dev); 960 retval = request_irq(dev->irq, xemaclite_interrupt, 0, dev->name, dev);
664 if (retval) { 961 if (retval) {
665 dev_err(&lp->ndev->dev, "Could not allocate interrupt %d\n", 962 dev_err(&lp->ndev->dev, "Could not allocate interrupt %d\n",
666 dev->irq); 963 dev->irq);
964 if (lp->phy_dev)
965 phy_disconnect(lp->phy_dev);
966 lp->phy_dev = NULL;
967
667 return retval; 968 return retval;
668 } 969 }
669 970
@@ -682,6 +983,7 @@ static int xemaclite_open(struct net_device *dev)
682 * 983 *
683 * This function stops the Tx queue, disables interrupts and frees the IRQ for 984 * This function stops the Tx queue, disables interrupts and frees the IRQ for
684 * the Emaclite device. 985 * the Emaclite device.
986 * It also disconnects the phy device associated with the Emaclite device.
685 */ 987 */
686static int xemaclite_close(struct net_device *dev) 988static int xemaclite_close(struct net_device *dev)
687{ 989{
@@ -691,6 +993,10 @@ static int xemaclite_close(struct net_device *dev)
691 xemaclite_disable_interrupts(lp); 993 xemaclite_disable_interrupts(lp);
692 free_irq(dev->irq, dev); 994 free_irq(dev->irq, dev);
693 995
996 if (lp->phy_dev)
997 phy_disconnect(lp->phy_dev);
998 lp->phy_dev = NULL;
999
694 return 0; 1000 return 0;
695} 1001}
696 1002
@@ -754,42 +1060,6 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
754} 1060}
755 1061
756/** 1062/**
757 * xemaclite_ioctl - Perform IO Control operations on the network device
758 * @dev: Pointer to the network device
759 * @rq: Pointer to the interface request structure
760 * @cmd: IOCTL command
761 *
762 * The only IOCTL operation supported by this function is setting the MAC
763 * address. An error is reported if any other operations are requested.
764 *
765 * Return: 0 to indicate success, or a negative error for failure.
766 */
767static int xemaclite_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
768{
769 struct net_local *lp = (struct net_local *) netdev_priv(dev);
770 struct hw_addr_data *hw_addr = (struct hw_addr_data *) &rq->ifr_hwaddr;
771
772 switch (cmd) {
773 case SIOCETHTOOL:
774 return -EIO;
775
776 case SIOCSIFHWADDR:
777 dev_err(&lp->ndev->dev, "SIOCSIFHWADDR\n");
778
779 /* Copy MAC address in from user space */
780 copy_from_user((void __force *) dev->dev_addr,
781 (void __user __force *) hw_addr,
782 IFHWADDRLEN);
783 xemaclite_set_mac_address(lp, dev->dev_addr);
784 break;
785 default:
786 return -EOPNOTSUPP;
787 }
788
789 return 0;
790}
791
792/**
793 * xemaclite_remove_ndev - Free the network device 1063 * xemaclite_remove_ndev - Free the network device
794 * @ndev: Pointer to the network device to be freed 1064 * @ndev: Pointer to the network device to be freed
795 * 1065 *
@@ -840,6 +1110,8 @@ static struct net_device_ops xemaclite_netdev_ops;
840 * This function probes for the Emaclite device in the device tree. 1110 * This function probes for the Emaclite device in the device tree.
841 * It initializes the driver data structure and the hardware, sets the MAC 1111 * It initializes the driver data structure and the hardware, sets the MAC
842 * address and registers the network device. 1112 * address and registers the network device.
1113 * It also registers a mii_bus for the Emaclite device, if MDIO is included
1114 * in the device.
843 * 1115 *
844 * Return: 0, if the driver is bound to the Emaclite device, or 1116 * Return: 0, if the driver is bound to the Emaclite device, or
845 * a negative error if there is failure. 1117 * a negative error if there is failure.
@@ -880,6 +1152,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
880 } 1152 }
881 1153
882 dev_set_drvdata(dev, ndev); 1154 dev_set_drvdata(dev, ndev);
1155 SET_NETDEV_DEV(ndev, &ofdev->dev);
883 1156
884 ndev->irq = r_irq.start; 1157 ndev->irq = r_irq.start;
885 ndev->mem_start = r_mem.start; 1158 ndev->mem_start = r_mem.start;
@@ -923,13 +1196,14 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
923 out_be32(lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET, 0); 1196 out_be32(lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET, 0);
924 1197
925 /* Set the MAC address in the EmacLite device */ 1198 /* Set the MAC address in the EmacLite device */
926 xemaclite_set_mac_address(lp, ndev->dev_addr); 1199 xemaclite_update_address(lp, ndev->dev_addr);
927 1200
928 dev_info(dev, 1201 lp->phy_node = of_parse_phandle(ofdev->node, "phy-handle", 0);
929 "MAC address is now %2x:%2x:%2x:%2x:%2x:%2x\n", 1202 rc = xemaclite_mdio_setup(lp, &ofdev->dev);
930 ndev->dev_addr[0], ndev->dev_addr[1], 1203 if (rc)
931 ndev->dev_addr[2], ndev->dev_addr[3], 1204 dev_warn(&ofdev->dev, "error registering MDIO bus\n");
932 ndev->dev_addr[4], ndev->dev_addr[5]); 1205
1206 dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
933 1207
934 ndev->netdev_ops = &xemaclite_netdev_ops; 1208 ndev->netdev_ops = &xemaclite_netdev_ops;
935 ndev->flags &= ~IFF_MULTICAST; 1209 ndev->flags &= ~IFF_MULTICAST;
@@ -972,12 +1246,25 @@ static int __devexit xemaclite_of_remove(struct of_device *of_dev)
972 struct device *dev = &of_dev->dev; 1246 struct device *dev = &of_dev->dev;
973 struct net_device *ndev = dev_get_drvdata(dev); 1247 struct net_device *ndev = dev_get_drvdata(dev);
974 1248
1249 struct net_local *lp = (struct net_local *) netdev_priv(ndev);
1250
1251 /* Un-register the mii_bus, if configured */
1252 if (lp->has_mdio) {
1253 mdiobus_unregister(lp->mii_bus);
1254 kfree(lp->mii_bus->irq);
1255 mdiobus_free(lp->mii_bus);
1256 lp->mii_bus = NULL;
1257 }
1258
975 unregister_netdev(ndev); 1259 unregister_netdev(ndev);
976 1260
1261 if (lp->phy_node)
1262 of_node_put(lp->phy_node);
1263 lp->phy_node = NULL;
1264
977 release_mem_region(ndev->mem_start, ndev->mem_end-ndev->mem_start + 1); 1265 release_mem_region(ndev->mem_start, ndev->mem_end-ndev->mem_start + 1);
978 1266
979 xemaclite_remove_ndev(ndev); 1267 xemaclite_remove_ndev(ndev);
980
981 dev_set_drvdata(dev, NULL); 1268 dev_set_drvdata(dev, NULL);
982 1269
983 return 0; 1270 return 0;
@@ -987,7 +1274,7 @@ static struct net_device_ops xemaclite_netdev_ops = {
987 .ndo_open = xemaclite_open, 1274 .ndo_open = xemaclite_open,
988 .ndo_stop = xemaclite_close, 1275 .ndo_stop = xemaclite_close,
989 .ndo_start_xmit = xemaclite_send, 1276 .ndo_start_xmit = xemaclite_send,
990 .ndo_do_ioctl = xemaclite_ioctl, 1277 .ndo_set_mac_address = xemaclite_set_mac_address,
991 .ndo_tx_timeout = xemaclite_tx_timeout, 1278 .ndo_tx_timeout = xemaclite_tx_timeout,
992 .ndo_get_stats = xemaclite_get_stats, 1279 .ndo_get_stats = xemaclite_get_stats,
993}; 1280};
@@ -999,6 +1286,7 @@ static struct of_device_id xemaclite_of_match[] __devinitdata = {
999 { .compatible = "xlnx,xps-ethernetlite-1.00.a", }, 1286 { .compatible = "xlnx,xps-ethernetlite-1.00.a", },
1000 { .compatible = "xlnx,xps-ethernetlite-2.00.a", }, 1287 { .compatible = "xlnx,xps-ethernetlite-2.00.a", },
1001 { .compatible = "xlnx,xps-ethernetlite-2.01.a", }, 1288 { .compatible = "xlnx,xps-ethernetlite-2.01.a", },
1289 { .compatible = "xlnx,xps-ethernetlite-3.00.a", },
1002 { /* end of list */ }, 1290 { /* end of list */ },
1003}; 1291};
1004MODULE_DEVICE_TABLE(of, xemaclite_of_match); 1292MODULE_DEVICE_TABLE(of, xemaclite_of_match);
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 0f773a9a3ff2..7d4107f5eeb0 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -23,12 +23,12 @@
23 23
24*/ 24*/
25 25
26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
26#define DRV_NAME "yellowfin" 28#define DRV_NAME "yellowfin"
27#define DRV_VERSION "2.1" 29#define DRV_VERSION "2.1"
28#define DRV_RELDATE "Sep 11, 2006" 30#define DRV_RELDATE "Sep 11, 2006"
29 31
30#define PFX DRV_NAME ": "
31
32/* The user-configurable values. 32/* The user-configurable values.
33 These may be modified when a driver module is loaded.*/ 33 These may be modified when a driver module is loaded.*/
34 34
@@ -237,7 +237,7 @@ static const struct pci_id_info pci_id_tbl[] = {
237 { } 237 { }
238}; 238};
239 239
240static const struct pci_device_id yellowfin_pci_tbl[] = { 240static DEFINE_PCI_DEVICE_TABLE(yellowfin_pci_tbl) = {
241 { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 241 { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
242 { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, 242 { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
243 { } 243 { }
@@ -399,7 +399,7 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev,
399 399
400 dev = alloc_etherdev(sizeof(*np)); 400 dev = alloc_etherdev(sizeof(*np));
401 if (!dev) { 401 if (!dev) {
402 printk (KERN_ERR PFX "cannot allocate ethernet device\n"); 402 pr_err("cannot allocate ethernet device\n");
403 return -ENOMEM; 403 return -ENOMEM;
404 } 404 }
405 SET_NETDEV_DEV(dev, &pdev->dev); 405 SET_NETDEV_DEV(dev, &pdev->dev);
@@ -487,10 +487,10 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev,
487 if (i) 487 if (i)
488 goto err_out_unmap_status; 488 goto err_out_unmap_status;
489 489
490 printk(KERN_INFO "%s: %s type %8x at %p, %pM, IRQ %d.\n", 490 netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n",
491 dev->name, pci_id_tbl[chip_idx].name, 491 pci_id_tbl[chip_idx].name,
492 ioread32(ioaddr + ChipRev), ioaddr, 492 ioread32(ioaddr + ChipRev), ioaddr,
493 dev->dev_addr, irq); 493 dev->dev_addr, irq);
494 494
495 if (np->drv_flags & HasMII) { 495 if (np->drv_flags & HasMII) {
496 int phy, phy_idx = 0; 496 int phy, phy_idx = 0;
@@ -499,9 +499,8 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev,
499 if (mii_status != 0xffff && mii_status != 0x0000) { 499 if (mii_status != 0xffff && mii_status != 0x0000) {
500 np->phys[phy_idx++] = phy; 500 np->phys[phy_idx++] = phy;
501 np->advertising = mdio_read(ioaddr, phy, 4); 501 np->advertising = mdio_read(ioaddr, phy, 4);
502 printk(KERN_INFO "%s: MII PHY found at address %d, status " 502 netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n",
503 "0x%4.4x advertising %4.4x.\n", 503 phy, mii_status, np->advertising);
504 dev->name, phy, mii_status, np->advertising);
505 } 504 }
506 } 505 }
507 np->mii_cnt = phy_idx; 506 np->mii_cnt = phy_idx;
@@ -584,8 +583,8 @@ static int yellowfin_open(struct net_device *dev)
584 return ret; 583 return ret;
585 584
586 if (yellowfin_debug > 1) 585 if (yellowfin_debug > 1)
587 printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n", 586 netdev_printk(KERN_DEBUG, dev, "%s() irq %d\n",
588 dev->name, dev->irq); 587 __func__, dev->irq);
589 588
590 ret = yellowfin_init_ring(dev); 589 ret = yellowfin_init_ring(dev);
591 if (ret) { 590 if (ret) {
@@ -642,8 +641,7 @@ static int yellowfin_open(struct net_device *dev)
642 iowrite32(0x80008000, ioaddr + TxCtrl); 641 iowrite32(0x80008000, ioaddr + TxCtrl);
643 642
644 if (yellowfin_debug > 2) { 643 if (yellowfin_debug > 2) {
645 printk(KERN_DEBUG "%s: Done yellowfin_open().\n", 644 netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__);
646 dev->name);
647 } 645 }
648 646
649 /* Set the timer to check for link beat. */ 647 /* Set the timer to check for link beat. */
@@ -664,8 +662,8 @@ static void yellowfin_timer(unsigned long data)
664 int next_tick = 60*HZ; 662 int next_tick = 60*HZ;
665 663
666 if (yellowfin_debug > 3) { 664 if (yellowfin_debug > 3) {
667 printk(KERN_DEBUG "%s: Yellowfin timer tick, status %8.8x.\n", 665 netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n",
668 dev->name, ioread16(ioaddr + IntrStatus)); 666 ioread16(ioaddr + IntrStatus));
669 } 667 }
670 668
671 if (yp->mii_cnt) { 669 if (yp->mii_cnt) {
@@ -673,9 +671,8 @@ static void yellowfin_timer(unsigned long data)
673 int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA); 671 int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
674 int negotiated = lpa & yp->advertising; 672 int negotiated = lpa & yp->advertising;
675 if (yellowfin_debug > 1) 673 if (yellowfin_debug > 1)
676 printk(KERN_DEBUG "%s: MII #%d status register is %4.4x, " 674 netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n",
677 "link partner capability %4.4x.\n", 675 yp->phys[0], bmsr, lpa);
678 dev->name, yp->phys[0], bmsr, lpa);
679 676
680 yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated); 677 yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
681 678
@@ -696,25 +693,24 @@ static void yellowfin_tx_timeout(struct net_device *dev)
696 struct yellowfin_private *yp = netdev_priv(dev); 693 struct yellowfin_private *yp = netdev_priv(dev);
697 void __iomem *ioaddr = yp->base; 694 void __iomem *ioaddr = yp->base;
698 695
699 printk(KERN_WARNING "%s: Yellowfin transmit timed out at %d/%d Tx " 696 netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n",
700 "status %4.4x, Rx status %4.4x, resetting...\n", 697 yp->cur_tx, yp->dirty_tx,
701 dev->name, yp->cur_tx, yp->dirty_tx, 698 ioread32(ioaddr + TxStatus),
702 ioread32(ioaddr + TxStatus), ioread32(ioaddr + RxStatus)); 699 ioread32(ioaddr + RxStatus));
703 700
704 /* Note: these should be KERN_DEBUG. */ 701 /* Note: these should be KERN_DEBUG. */
705 if (yellowfin_debug) { 702 if (yellowfin_debug) {
706 int i; 703 int i;
707 printk(KERN_WARNING " Rx ring %p: ", yp->rx_ring); 704 pr_warning(" Rx ring %p: ", yp->rx_ring);
708 for (i = 0; i < RX_RING_SIZE; i++) 705 for (i = 0; i < RX_RING_SIZE; i++)
709 printk(KERN_CONT " %8.8x", 706 pr_cont(" %08x", yp->rx_ring[i].result_status);
710 yp->rx_ring[i].result_status); 707 pr_cont("\n");
711 printk(KERN_CONT "\n"); 708 pr_warning(" Tx ring %p: ", yp->tx_ring);
712 printk(KERN_WARNING" Tx ring %p: ", yp->tx_ring);
713 for (i = 0; i < TX_RING_SIZE; i++) 709 for (i = 0; i < TX_RING_SIZE; i++)
714 printk(KERN_CONT " %4.4x /%8.8x", 710 pr_cont(" %04x /%08x",
715 yp->tx_status[i].tx_errs, 711 yp->tx_status[i].tx_errs,
716 yp->tx_ring[i].result_status); 712 yp->tx_ring[i].result_status);
717 printk(KERN_CONT "\n"); 713 pr_cont("\n");
718 } 714 }
719 715
720 /* If the hardware is found to hang regularly, we will update the code 716 /* If the hardware is found to hang regularly, we will update the code
@@ -891,8 +887,8 @@ static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
891 yp->tx_full = 1; 887 yp->tx_full = 1;
892 888
893 if (yellowfin_debug > 4) { 889 if (yellowfin_debug > 4) {
894 printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n", 890 netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n",
895 dev->name, yp->cur_tx, entry); 891 yp->cur_tx, entry);
896 } 892 }
897 return NETDEV_TX_OK; 893 return NETDEV_TX_OK;
898} 894}
@@ -916,8 +912,8 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
916 u16 intr_status = ioread16(ioaddr + IntrClear); 912 u16 intr_status = ioread16(ioaddr + IntrClear);
917 913
918 if (yellowfin_debug > 4) 914 if (yellowfin_debug > 4)
919 printk(KERN_DEBUG "%s: Yellowfin interrupt, status %4.4x.\n", 915 netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n",
920 dev->name, intr_status); 916 intr_status);
921 917
922 if (intr_status == 0) 918 if (intr_status == 0)
923 break; 919 break;
@@ -963,13 +959,12 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
963 959
964#ifndef final_version 960#ifndef final_version
965 if (yellowfin_debug > 5) 961 if (yellowfin_debug > 5)
966 printk(KERN_DEBUG "%s: Tx queue %d check, Tx status " 962 netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n",
967 "%4.4x %4.4x %4.4x %4.4x.\n", 963 entry,
968 dev->name, entry, 964 yp->tx_status[entry].tx_cnt,
969 yp->tx_status[entry].tx_cnt, 965 yp->tx_status[entry].tx_errs,
970 yp->tx_status[entry].tx_errs, 966 yp->tx_status[entry].total_tx_cnt,
971 yp->tx_status[entry].total_tx_cnt, 967 yp->tx_status[entry].paused);
972 yp->tx_status[entry].paused);
973#endif 968#endif
974 if (tx_errs == 0) 969 if (tx_errs == 0)
975 break; /* It still hasn't been Txed */ 970 break; /* It still hasn't been Txed */
@@ -978,8 +973,8 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
978 /* There was an major error, log it. */ 973 /* There was an major error, log it. */
979#ifndef final_version 974#ifndef final_version
980 if (yellowfin_debug > 1) 975 if (yellowfin_debug > 1)
981 printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n", 976 netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n",
982 dev->name, tx_errs); 977 tx_errs);
983#endif 978#endif
984 dev->stats.tx_errors++; 979 dev->stats.tx_errors++;
985 if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++; 980 if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
@@ -989,8 +984,8 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
989 } else { 984 } else {
990#ifndef final_version 985#ifndef final_version
991 if (yellowfin_debug > 4) 986 if (yellowfin_debug > 4)
992 printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n", 987 netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n",
993 dev->name, tx_errs); 988 tx_errs);
994#endif 989#endif
995 dev->stats.tx_bytes += skb->len; 990 dev->stats.tx_bytes += skb->len;
996 dev->stats.collisions += tx_errs & 15; 991 dev->stats.collisions += tx_errs & 15;
@@ -1008,8 +1003,8 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
1008 1003
1009#ifndef final_version 1004#ifndef final_version
1010 if (yp->cur_tx - dirty_tx > TX_RING_SIZE) { 1005 if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
1011 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n", 1006 netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n",
1012 dev->name, dirty_tx, yp->cur_tx, yp->tx_full); 1007 dirty_tx, yp->cur_tx, yp->tx_full);
1013 dirty_tx += TX_RING_SIZE; 1008 dirty_tx += TX_RING_SIZE;
1014 } 1009 }
1015#endif 1010#endif
@@ -1031,16 +1026,15 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
1031 yellowfin_error(dev, intr_status); 1026 yellowfin_error(dev, intr_status);
1032 1027
1033 if (--boguscnt < 0) { 1028 if (--boguscnt < 0) {
1034 printk(KERN_WARNING "%s: Too much work at interrupt, " 1029 netdev_warn(dev, "Too much work at interrupt, status=%#04x\n",
1035 "status=0x%4.4x.\n", 1030 intr_status);
1036 dev->name, intr_status);
1037 break; 1031 break;
1038 } 1032 }
1039 } while (1); 1033 } while (1);
1040 1034
1041 if (yellowfin_debug > 3) 1035 if (yellowfin_debug > 3)
1042 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", 1036 netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n",
1043 dev->name, ioread16(ioaddr + IntrStatus)); 1037 ioread16(ioaddr + IntrStatus));
1044 1038
1045 spin_unlock (&yp->lock); 1039 spin_unlock (&yp->lock);
1046 return IRQ_RETVAL(handled); 1040 return IRQ_RETVAL(handled);
@@ -1055,9 +1049,9 @@ static int yellowfin_rx(struct net_device *dev)
1055 int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx; 1049 int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1056 1050
1057 if (yellowfin_debug > 4) { 1051 if (yellowfin_debug > 4) {
1058 printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %8.8x.\n", 1052 printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n",
1059 entry, yp->rx_ring[entry].result_status); 1053 entry, yp->rx_ring[entry].result_status);
1060 printk(KERN_DEBUG " #%d desc. %8.8x %8.8x %8.8x.\n", 1054 printk(KERN_DEBUG " #%d desc. %08x %08x %08x\n",
1061 entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr, 1055 entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1062 yp->rx_ring[entry].result_status); 1056 yp->rx_ring[entry].result_status);
1063 } 1057 }
@@ -1081,20 +1075,20 @@ static int yellowfin_rx(struct net_device *dev)
1081 le32_to_cpu(desc->result_status)) & 0xffff; 1075 le32_to_cpu(desc->result_status)) & 0xffff;
1082 frame_status = get_unaligned_le16(&(buf_addr[data_size - 2])); 1076 frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
1083 if (yellowfin_debug > 4) 1077 if (yellowfin_debug > 4)
1084 printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n", 1078 printk(KERN_DEBUG " %s() status was %04x\n",
1085 frame_status); 1079 __func__, frame_status);
1086 if (--boguscnt < 0) 1080 if (--boguscnt < 0)
1087 break; 1081 break;
1088 if ( ! (desc_status & RX_EOP)) { 1082 if ( ! (desc_status & RX_EOP)) {
1089 if (data_size != 0) 1083 if (data_size != 0)
1090 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers," 1084 netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
1091 " status %4.4x, data_size %d!\n", dev->name, desc_status, data_size); 1085 desc_status, data_size);
1092 dev->stats.rx_length_errors++; 1086 dev->stats.rx_length_errors++;
1093 } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) { 1087 } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) {
1094 /* There was a error. */ 1088 /* There was a error. */
1095 if (yellowfin_debug > 3) 1089 if (yellowfin_debug > 3)
1096 printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n", 1090 printk(KERN_DEBUG " %s() Rx error was %04x\n",
1097 frame_status); 1091 __func__, frame_status);
1098 dev->stats.rx_errors++; 1092 dev->stats.rx_errors++;
1099 if (frame_status & 0x0060) dev->stats.rx_length_errors++; 1093 if (frame_status & 0x0060) dev->stats.rx_length_errors++;
1100 if (frame_status & 0x0008) dev->stats.rx_frame_errors++; 1094 if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
@@ -1118,8 +1112,8 @@ static int yellowfin_rx(struct net_device *dev)
1118 entry*sizeof(struct yellowfin_desc)), 1112 entry*sizeof(struct yellowfin_desc)),
1119 "\377\377\377\377\377\377", 6) != 0) { 1113 "\377\377\377\377\377\377", 6) != 0) {
1120 if (bogus_rx++ == 0) 1114 if (bogus_rx++ == 0)
1121 printk(KERN_WARNING "%s: Bad frame to %pM\n", 1115 netdev_warn(dev, "Bad frame to %pM\n",
1122 dev->name, buf_addr); 1116 buf_addr);
1123#endif 1117#endif
1124 } else { 1118 } else {
1125 struct sk_buff *skb; 1119 struct sk_buff *skb;
@@ -1129,9 +1123,8 @@ static int yellowfin_rx(struct net_device *dev)
1129 1123
1130#ifndef final_version 1124#ifndef final_version
1131 if (yellowfin_debug > 4) 1125 if (yellowfin_debug > 4)
1132 printk(KERN_DEBUG " yellowfin_rx() normal Rx pkt length %d" 1126 printk(KERN_DEBUG " %s() normal Rx pkt length %d of %d, bogus_cnt %d\n",
1133 " of %d, bogus_cnt %d.\n", 1127 __func__, pkt_len, data_size, boguscnt);
1134 pkt_len, data_size, boguscnt);
1135#endif 1128#endif
1136 /* Check if the packet is long enough to just pass up the skbuff 1129 /* Check if the packet is long enough to just pass up the skbuff
1137 without copying to a properly sized skbuff. */ 1130 without copying to a properly sized skbuff. */
@@ -1191,8 +1184,7 @@ static int yellowfin_rx(struct net_device *dev)
1191 1184
1192static void yellowfin_error(struct net_device *dev, int intr_status) 1185static void yellowfin_error(struct net_device *dev, int intr_status)
1193{ 1186{
1194 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n", 1187 netdev_err(dev, "Something Wicked happened! %04x\n", intr_status);
1195 dev->name, intr_status);
1196 /* Hmmmmm, it's not clear what to do here. */ 1188 /* Hmmmmm, it's not clear what to do here. */
1197 if (intr_status & (IntrTxPCIErr | IntrTxPCIFault)) 1189 if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1198 dev->stats.tx_errors++; 1190 dev->stats.tx_errors++;
@@ -1209,13 +1201,13 @@ static int yellowfin_close(struct net_device *dev)
1209 netif_stop_queue (dev); 1201 netif_stop_queue (dev);
1210 1202
1211 if (yellowfin_debug > 1) { 1203 if (yellowfin_debug > 1) {
1212 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x " 1204 netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n",
1213 "Rx %4.4x Int %2.2x.\n", 1205 ioread16(ioaddr + TxStatus),
1214 dev->name, ioread16(ioaddr + TxStatus), 1206 ioread16(ioaddr + RxStatus),
1215 ioread16(ioaddr + RxStatus), 1207 ioread16(ioaddr + IntrStatus));
1216 ioread16(ioaddr + IntrStatus)); 1208 netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
1217 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", 1209 yp->cur_tx, yp->dirty_tx,
1218 dev->name, yp->cur_tx, yp->dirty_tx, yp->cur_rx, yp->dirty_rx); 1210 yp->cur_rx, yp->dirty_rx);
1219 } 1211 }
1220 1212
1221 /* Disable interrupts by clearing the interrupt mask. */ 1213 /* Disable interrupts by clearing the interrupt mask. */
@@ -1229,33 +1221,35 @@ static int yellowfin_close(struct net_device *dev)
1229 1221
1230#if defined(__i386__) 1222#if defined(__i386__)
1231 if (yellowfin_debug > 2) { 1223 if (yellowfin_debug > 2) {
1232 printk(KERN_DEBUG" Tx ring at %8.8llx:\n", 1224 printk(KERN_DEBUG " Tx ring at %08llx:\n",
1233 (unsigned long long)yp->tx_ring_dma); 1225 (unsigned long long)yp->tx_ring_dma);
1234 for (i = 0; i < TX_RING_SIZE*2; i++) 1226 for (i = 0; i < TX_RING_SIZE*2; i++)
1235 printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x %8.8x.\n", 1227 printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n",
1236 ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ', 1228 ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1237 i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr, 1229 i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1238 yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status); 1230 yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1239 printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status); 1231 printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
1240 for (i = 0; i < TX_RING_SIZE; i++) 1232 for (i = 0; i < TX_RING_SIZE; i++)
1241 printk(KERN_DEBUG " #%d status %4.4x %4.4x %4.4x %4.4x.\n", 1233 printk(KERN_DEBUG " #%d status %04x %04x %04x %04x\n",
1242 i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs, 1234 i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1243 yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused); 1235 yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1244 1236
1245 printk(KERN_DEBUG " Rx ring %8.8llx:\n", 1237 printk(KERN_DEBUG " Rx ring %08llx:\n",
1246 (unsigned long long)yp->rx_ring_dma); 1238 (unsigned long long)yp->rx_ring_dma);
1247 for (i = 0; i < RX_RING_SIZE; i++) { 1239 for (i = 0; i < RX_RING_SIZE; i++) {
1248 printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x\n", 1240 printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n",
1249 ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ', 1241 ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1250 i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr, 1242 i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1251 yp->rx_ring[i].result_status); 1243 yp->rx_ring[i].result_status);
1252 if (yellowfin_debug > 6) { 1244 if (yellowfin_debug > 6) {
1253 if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) { 1245 if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1254 int j; 1246 int j;
1247
1248 printk(KERN_DEBUG);
1255 for (j = 0; j < 0x50; j++) 1249 for (j = 0; j < 0x50; j++)
1256 printk(" %4.4x", 1250 pr_cont(" %04x",
1257 get_unaligned(((u16*)yp->rx_ring[i].addr) + j)); 1251 get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1258 printk("\n"); 1252 pr_cont("\n");
1259 } 1253 }
1260 } 1254 }
1261 } 1255 }
@@ -1281,8 +1275,8 @@ static int yellowfin_close(struct net_device *dev)
1281 1275
1282#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */ 1276#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
1283 if (yellowfin_debug > 0) { 1277 if (yellowfin_debug > 0) {
1284 printk(KERN_DEBUG "%s: Received %d frames that we should not have.\n", 1278 netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n",
1285 dev->name, bogus_rx); 1279 bogus_rx);
1286 } 1280 }
1287#endif 1281#endif
1288 1282
@@ -1301,16 +1295,17 @@ static void set_rx_mode(struct net_device *dev)
1301 iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg); 1295 iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
1302 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1296 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1303 iowrite16(0x000F, ioaddr + AddrMode); 1297 iowrite16(0x000F, ioaddr + AddrMode);
1304 } else if ((dev->mc_count > 64) || (dev->flags & IFF_ALLMULTI)) { 1298 } else if ((netdev_mc_count(dev) > 64) ||
1299 (dev->flags & IFF_ALLMULTI)) {
1305 /* Too many to filter well, or accept all multicasts. */ 1300 /* Too many to filter well, or accept all multicasts. */
1306 iowrite16(0x000B, ioaddr + AddrMode); 1301 iowrite16(0x000B, ioaddr + AddrMode);
1307 } else if (dev->mc_count > 0) { /* Must use the multicast hash table. */ 1302 } else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
1308 struct dev_mc_list *mclist; 1303 struct dev_mc_list *mclist;
1309 u16 hash_table[4]; 1304 u16 hash_table[4];
1310 int i; 1305 int i;
1306
1311 memset(hash_table, 0, sizeof(hash_table)); 1307 memset(hash_table, 0, sizeof(hash_table));
1312 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1308 netdev_for_each_mc_addr(mclist, dev) {
1313 i++, mclist = mclist->next) {
1314 unsigned int bit; 1309 unsigned int bit;
1315 1310
1316 /* Due to a bug in the early chip versions, multiple filter 1311 /* Due to a bug in the early chip versions, multiple filter
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index bc5ae0f6e934..def49d2ec69a 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -313,7 +313,8 @@ static void znet_set_multicast_list (struct net_device *dev)
313 /* Byte D */ 313 /* Byte D */
314 cfblk->dummy_1 = 1; /* set to 1 */ 314 cfblk->dummy_1 = 1; /* set to 1 */
315 cfblk->tx_ifs_retrig = 3; /* Hmm... Disabled */ 315 cfblk->tx_ifs_retrig = 3; /* Hmm... Disabled */
316 cfblk->mc_all = (dev->mc_list || (dev->flags&IFF_ALLMULTI));/* multicast all mode */ 316 cfblk->mc_all = (!netdev_mc_empty(dev) ||
317 (dev->flags & IFF_ALLMULTI)); /* multicast all mode */
317 cfblk->rcv_mon = 0; /* Monitor mode disabled */ 318 cfblk->rcv_mon = 0; /* Monitor mode disabled */
318 cfblk->frag_acpt = 0; /* Do not accept fragments */ 319 cfblk->frag_acpt = 0; /* Do not accept fragments */
319 cfblk->tstrttrs = 0; /* No start transmission threshold */ 320 cfblk->tstrttrs = 0; /* No start transmission threshold */